summaryrefslogtreecommitdiffstats
path: root/contrib/restricted/aws/aws-c-sdkutils
diff options
context:
space:
mode:
authordakovalkov <[email protected]>2023-12-03 13:33:55 +0300
committerdakovalkov <[email protected]>2023-12-03 14:04:39 +0300
commit2a718325637e5302334b6d0a6430f63168f8dbb3 (patch)
tree64be81080b7df9ec1d86d053a0c394ae53fcf1fe /contrib/restricted/aws/aws-c-sdkutils
parente0d94a470142d95c3007e9c5d80380994940664a (diff)
Update contrib/libs/aws-sdk-cpp to 1.11.37
Diffstat (limited to 'contrib/restricted/aws/aws-c-sdkutils')
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt33
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt33
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt34
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt34
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt33
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md59
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/LICENSE175
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/NOTICE1
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/README.md29
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h218
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h303
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h30
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h38
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h314
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h136
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h44
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h51
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c1592
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c1132
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c958
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c639
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c235
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c588
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/partitions.c283
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c108
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c67
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/ya.make43
29 files changed, 7233 insertions, 0 deletions
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt
new file mode 100644
index 00000000000..dae09565201
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,33 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 00000000000..dae09565201
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,33 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt
new file mode 100644
index 00000000000..7aa506361e2
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,34 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt
new file mode 100644
index 00000000000..7aa506361e2
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,34 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt
new file mode 100644
index 00000000000..2dce3a77fe3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt
new file mode 100644
index 00000000000..dae09565201
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,33 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000000..5b627cfa60b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
[email protected] with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md
new file mode 100644
index 00000000000..c4b6a1c5081
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md
@@ -0,0 +1,59 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
[email protected] with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/LICENSE b/contrib/restricted/aws/aws-c-sdkutils/LICENSE
new file mode 100644
index 00000000000..67db8588217
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/NOTICE b/contrib/restricted/aws/aws-c-sdkutils/NOTICE
new file mode 100644
index 00000000000..616fc588945
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/NOTICE
@@ -0,0 +1 @@
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/README.md b/contrib/restricted/aws/aws-c-sdkutils/README.md
new file mode 100644
index 00000000000..12d391f5be4
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/README.md
@@ -0,0 +1,29 @@
+## AWS C SDKUTILS
+
+C99 library implementing AWS SDK specific utilities. Includes utilities for ARN
+parsing, reading AWS profiles, etc...
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+CMake 3.0+ is required to build.
+
+`<install-path>` must be an absolute path in the following instructions.
+
+
+#### Building aws-c-sdkutils
+
+```
+git clone [email protected]:awslabs/aws-c-common.git
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-c-common/build --target install
+
+git clone [email protected]:awslabs/aws-c-sdkutils.git
+cmake -S aws-c-sdkutils -B aws-c-sdkutils/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-sdkutils/build --target install
+``` \ No newline at end of file
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h
new file mode 100644
index 00000000000..5a200654cc3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h
@@ -0,0 +1,218 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_AWS_PROFILE_H
+#define AWS_SDKUTILS_AWS_PROFILE_H
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_allocator;
+struct aws_string;
+struct aws_byte_buf;
+struct aws_byte_cursor;
+
+/*
+ * A set of data types that model the aws profile specification
+ *
+ * A profile collection is a collection of zero or more named profiles
+ * Each profile is a set of properties (named key-value pairs)
+ * Empty-valued properties may have sub properties (named key-value pairs)
+ *
+ * Resolution rules exist to determine what profile to use, what files to
+ * read profile collections from, and what types of credentials have priority.
+ *
+ * The profile specification is informally defined as "what the aws cli does" and
+ * formally defined in internal aws documents.
+ */
+struct aws_profile_property;
+struct aws_profile;
+struct aws_profile_collection;
+
+/**
+ * The profile specification has rule exceptions based on what file
+ * the profile collection comes from.
+ */
+enum aws_profile_source_type { AWS_PST_NONE, AWS_PST_CONFIG, AWS_PST_CREDENTIALS };
+
+/*
+ * The collection can hold different types of sections.
+ */
+enum aws_profile_section_type {
+ AWS_PROFILE_SECTION_TYPE_PROFILE,
+ AWS_PROFILE_SECTION_TYPE_SSO_SESSION,
+
+ AWS_PROFILE_SECTION_TYPE_COUNT,
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*************************
+ * Profile collection APIs
+ *************************/
+
+/**
+ * Increments the reference count on the profile collection, allowing the caller to take a reference to it.
+ *
+ * Returns the same profile collection passed in.
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_acquire(struct aws_profile_collection *collection);
+
+/**
+ * Decrements a profile collection's ref count. When the ref count drops to zero, the collection will be destroyed.
+ * Returns NULL.
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_release(struct aws_profile_collection *collection);
+
+/**
+ * @Deprecated This is equivalent to aws_profile_collection_release.
+ */
+AWS_SDKUTILS_API
+void aws_profile_collection_destroy(struct aws_profile_collection *profile_collection);
+
+/**
+ * Create a new profile collection by parsing a file with the specified path
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_new_from_file(
+ struct aws_allocator *allocator,
+ const struct aws_string *file_path,
+ enum aws_profile_source_type source);
+
+/**
+ * Create a new profile collection by merging a config-file-based profile
+ * collection and a credentials-file-based profile collection
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_new_from_merge(
+ struct aws_allocator *allocator,
+ const struct aws_profile_collection *config_profiles,
+ const struct aws_profile_collection *credentials_profiles);
+
+/**
+ * Create a new profile collection by parsing text in a buffer. Primarily
+ * for testing.
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_new_from_buffer(
+ struct aws_allocator *allocator,
+ const struct aws_byte_buf *buffer,
+ enum aws_profile_source_type source);
+
+/**
+ * Retrieves a reference to a profile with the specified name, if it exists, from the profile collection
+ */
+AWS_SDKUTILS_API
+const struct aws_profile *aws_profile_collection_get_profile(
+ const struct aws_profile_collection *profile_collection,
+ const struct aws_string *profile_name);
+
+/*
+ * Retrieves a reference to a section with the specified name and type, if it exists, from the profile collection.
+ * You can get the "default" profile or credentials file sections by passing `AWS_PROFILE_SECTION_TYPE_PROFILE`
+ */
+AWS_SDKUTILS_API
+const struct aws_profile *aws_profile_collection_get_section(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type,
+ const struct aws_string *section_name);
+
+/**
+ * Returns the number of profiles in a collection
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_collection_get_profile_count(const struct aws_profile_collection *profile_collection);
+
+/**
+ * Returns the number of elements of the specified section in a collection.
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_collection_get_section_count(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type);
+
+/**
+ * Returns a reference to the name of the provided profile
+ */
+AWS_SDKUTILS_API
+const struct aws_string *aws_profile_get_name(const struct aws_profile *profile);
+
+/**************
+ * profile APIs
+ **************/
+
+/**
+ * Retrieves a reference to a property with the specified name, if it exists, from a profile
+ */
+AWS_SDKUTILS_API
+const struct aws_profile_property *aws_profile_get_property(
+ const struct aws_profile *profile,
+ const struct aws_string *property_name);
+
+/**
+ * Returns how many properties a profile holds
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_get_property_count(const struct aws_profile *profile);
+
+/**
+ * Returns a reference to the property's string value
+ */
+AWS_SDKUTILS_API
+const struct aws_string *aws_profile_property_get_value(const struct aws_profile_property *property);
+
+/***********************
+ * profile property APIs
+ ***********************/
+
+/**
+ * Returns a reference to the value of a sub property with the given name, if it exists, in the property
+ */
+AWS_SDKUTILS_API
+const struct aws_string *aws_profile_property_get_sub_property(
+ const struct aws_profile_property *property,
+ const struct aws_string *sub_property_name);
+
+/**
+ * Returns how many sub properties the property holds
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_property_get_sub_property_count(const struct aws_profile_property *property);
+
+/***********
+ * Misc APIs
+ ***********/
+
+/**
+ * Computes the final platform-specific path for the profile credentials file. Does limited home directory
+ * expansion/resolution.
+ *
+ * override_path, if not null, will be searched first instead of using the standard home directory config path
+ */
+AWS_SDKUTILS_API
+struct aws_string *aws_get_credentials_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path);
+
+/**
+ * Computes the final platform-specific path for the profile config file. Does limited home directory
+ * expansion/resolution.
+ *
+ * override_path, if not null, will be searched first instead of using the standard home directory config path
+ */
+AWS_SDKUTILS_API
+struct aws_string *aws_get_config_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path);
+
+/**
+ * Computes the profile to use for credentials lookups based on profile resolution rules
+ */
+AWS_SDKUTILS_API
+struct aws_string *aws_get_profile_name(struct aws_allocator *allocator, const struct aws_byte_cursor *override_name);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_AWS_PROFILE_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h
new file mode 100644
index 00000000000..701ba1bd931
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h
@@ -0,0 +1,303 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_ENDPOINTS_RULESET_H
+#define AWS_SDKUTILS_ENDPOINTS_RULESET_H
+
+#include <aws/common/byte_buf.h>
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_endpoints_ruleset;
+struct aws_partitions_config;
+struct aws_endpoints_parameter;
+struct aws_endpoints_rule_engine;
+struct aws_endpoints_resolved_endpoint;
+struct aws_endpoints_request_context;
+struct aws_hash_table;
+
+enum aws_endpoints_parameter_type { AWS_ENDPOINTS_PARAMETER_STRING, AWS_ENDPOINTS_PARAMETER_BOOLEAN };
+enum aws_endpoints_resolved_endpoint_type { AWS_ENDPOINTS_RESOLVED_ENDPOINT, AWS_ENDPOINTS_RESOLVED_ERROR };
+
+AWS_EXTERN_C_BEGIN
+
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_get_supported_ruleset_version(void);
+
+/*
+******************************
+* Parameter
+******************************
+*/
+
+/*
+ * Value type of parameter.
+ */
+AWS_SDKUTILS_API enum aws_endpoints_parameter_type aws_endpoints_parameter_get_type(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Specifies whether parameter maps to one of SDK built ins (ex. "AWS::Region").
+ * Return is a cursor specifying the name of associated built in.
+ * If there is no mapping, cursor will be empty.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_built_in(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Default string value.
+ * out_cursor will point to default string value if one exist and will be empty
+ * otherwise.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ * Returns AWS_OP_ERR if parameter is not a string.
+ */
+AWS_SDKUTILS_API int aws_endpoints_parameter_get_default_string(
+ const struct aws_endpoints_parameter *parameter,
+ struct aws_byte_cursor *out_cursor);
+
+/*
+ * Default boolean value.
+ * out_bool will have pointer to value if default is specified, NULL otherwise.
+ * Owned by parameter.
+ * Returns AWS_OP_ERR if parameter is not a boolean.
+ */
+AWS_SDKUTILS_API int aws_endpoints_parameter_get_default_boolean(
+ const struct aws_endpoints_parameter *parameter,
+ const bool **out_bool);
+
+/*
+ * Whether parameter is required.
+ */
+AWS_SDKUTILS_API bool aws_endpoints_parameter_get_is_required(const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Returns cursor to parameter documentation.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ * Will not be empty as doc is required.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_documentation(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Whether parameter is deprecated.
+ */
+AWS_SDKUTILS_API bool aws_endpoints_parameters_get_is_deprecated(const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Deprecation message. Cursor is empty if parameter is not deprecated.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_message(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Deprecated since. Cursor is empty if parameter is not deprecated.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_since(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+******************************
+* Ruleset
+******************************
+*/
+
+/*
+ * Create new ruleset from a json string.
+ * In cases of failure NULL is returned and last error is set.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor ruleset_json);
+
+/*
+ * Increment ref count
+ */
+AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_acquire(struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Decrement ref count
+ */
+AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_release(struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Get ruleset parameters.
+ * Return is a hashtable with paramater name as a key (aws_byte_cursor *) and parameter
+ * (aws_endpoints_parameter *) as a value. Ruleset owns the owns the hashtable and
+ * pointer is valid during ruleset lifetime. Will never return a NULL. In case
+ * there are no parameters in the ruleset, hash table will contain 0 elements.
+ *
+ * Note on usage in bindings:
+ * - this is basically a map from a parameter name to a structure describing parameter
+ * - deep copy all the fields and let language take ownership of data
+ * Consider transforming this into language specific map (dict for python, Map
+ * in Java, std::map in C++, etc...) instead of wrapping it into a custom class.
+ */
+AWS_SDKUTILS_API const struct aws_hash_table *aws_endpoints_ruleset_get_parameters(
+ struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Ruleset version.
+ * Returned pointer is owned by ruleset.
+ * Will not return NULL as version is a required field for ruleset.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_ruleset_get_version(const struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Ruleset service id.
+ * Returned pointer is owned by ruleset.
+ * Can be NULL if not specified in ruleset.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_ruleset_get_service_id(
+ const struct aws_endpoints_ruleset *ruleset);
+
+/*
+******************************
+* Rule engine
+******************************
+*/
+
+/**
+ * Create new rule engine for a given ruleset.
+ * In cases of failure NULL is returned and last error is set.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_new(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_ruleset *ruleset,
+ struct aws_partitions_config *partitions_config);
+
+/*
+ * Increment rule engine ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_acquire(
+ struct aws_endpoints_rule_engine *rule_engine);
+
+/*
+ * Decrement rule engine ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_release(
+ struct aws_endpoints_rule_engine *rule_engine);
+
+/*
+ * Creates new request context.
+ * This is basically a property bag containing all request parameter values needed to
+ * resolve endpoint. Parameter value names must match parameter names specified
+ * in ruleset.
+ * Caller is responsible for releasing request context.
+ * Note on usage in bindings:
+ * - Consider exposing it as a custom property bag or a standard map and then
+ * transform it into request context.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_new(
+ struct aws_allocator *allocator);
+
+/*
+ * Increment resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_acquire(
+ struct aws_endpoints_request_context *request_context);
+
+/*
+ * Decrement resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_release(
+ struct aws_endpoints_request_context *request_context);
+
+/*
+ * Add string value to request context.
+ * Note: this function will make a copy of the memory backing the cursors.
+ * The function will override any previous value stored in the context with the
+ * same name.
+ */
+AWS_SDKUTILS_API int aws_endpoints_request_context_add_string(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value);
+
+/*
+ * Add boolean value to request context.
+ * Note: this function will make a copy of the memory backing the cursors.
+ * The function will override any previous value stored in the context with the
+ * same name.
+ */
+AWS_SDKUTILS_API int aws_endpoints_request_context_add_boolean(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ bool value);
+
+/*
+ * Resolve an endpoint given request context.
+ * Resolved endpoint is returned through out_resolved_endpoint.
+ * In cases of error out_resolved_endpoint is set to NULL and error is returned.
+ * Resolved endpoint is ref counter and caller is responsible for releasing it.
+ */
+AWS_SDKUTILS_API int aws_endpoints_rule_engine_resolve(
+ struct aws_endpoints_rule_engine *engine,
+ const struct aws_endpoints_request_context *context,
+ struct aws_endpoints_resolved_endpoint **out_resolved_endpoint);
+
+/*
+ * Increment resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_acquire(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint);
+
+/*
+ * Decrement resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_release(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint);
+
+/*
+ * Get type of resolved endpoint.
+ */
+AWS_SDKUTILS_API enum aws_endpoints_resolved_endpoint_type aws_endpoints_resolved_endpoint_get_type(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint);
+
+/*
+ * Get url for the resolved endpoint.
+ * Valid only if resolved endpoint has endpoint type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_url(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_url);
+
+/*
+ * Get properties for the resolved endpoint.
+ * Note: properties is a json string containing additional data for a given
+ * endpoint. Data is not typed and is not guaranteed to change in the future.
+ * For use at callers discretion.
+ * Valid only if resolved endpoint has endpoint type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_properties(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_properties);
+
+/*
+ * Get headers for the resolved endpoint.
+ * out_headers type is aws_hash_table with (aws_string *) as key
+ * and (aws_array_list * of aws_string *) as value.
+ * Note on usage in bindings:
+ * - this is a map to a list of strings and can be implemented as such in the
+ * target language with deep copy of all underlying strings.
+ * Valid only if resolved endpoint has endpoint type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_headers(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ const struct aws_hash_table **out_headers);
+
+/*
+ * Get error for the resolved endpoint.
+ * Valid only if resolved endpoint has error type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_error(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_error);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_ENDPOINTS_RULESET_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h
new file mode 100644
index 00000000000..6571706e6fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h
@@ -0,0 +1,30 @@
+#ifndef AWS_SDKUTILS_EXPORTS_H
+#define AWS_SDKUTILS_EXPORTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_SDKUTILS_USE_IMPORT_EXPORT
+# ifdef AWS_SDKUTILS_EXPORTS
+# define AWS_SDKUTILS_API __declspec(dllexport)
+# else
+# define AWS_SDKUTILS_API __declspec(dllimport)
+# endif /* AWS_SDKUTILS_EXPORTS */
+# else
+# define AWS_SDKUTILS_API
+# endif /*USE_IMPORT_EXPORT */
+
+#else
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_SDKUTILS_USE_IMPORT_EXPORT) && \
+ defined(AWS_SDKUTILS_EXPORTS)
+# define AWS_SDKUTILS_API __attribute__((visibility("default")))
+# else
+# define AWS_SDKUTILS_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */
+
+#endif /* AWS_SDKUTILS_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h
new file mode 100644
index 00000000000..bcbd96589c3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h
@@ -0,0 +1,38 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_PARTITIONS_H
+#define AWS_SDKUTILS_PARTITIONS_H
+
+#include <aws/common/byte_buf.h>
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_partitions_config;
+
+AWS_EXTERN_C_BEGIN
+
+AWS_SDKUTILS_API struct aws_byte_cursor aws_partitions_get_supported_version(void);
+
+/*
+ * Create new partitions config from a json string.
+ * In cases of failure NULL is returned and last error is set.
+ */
+AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor json);
+
+/*
+ * Increment ref count
+ */
+AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_acquire(struct aws_partitions_config *partitions);
+
+/*
+ * Decrement ref count
+ */
+AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_release(struct aws_partitions_config *partitions);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_PARTITIONS_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h
new file mode 100644
index 00000000000..d4d0823c961
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h
@@ -0,0 +1,314 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H
+#define AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H
+
+#include <aws/common/hash_table.h>
+#include <aws/common/ref_count.h>
+#include <aws/sdkutils/endpoints_rule_engine.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+struct aws_json_value;
+
+/*
+ * Rule engine is built around 2 major types:
+ * - expr - can be a literal, like bool or number or expression like function or ref
+ * - value - literal types only. result of resolving expr. Can have special None
+ * value depending on how expr is resolved. Ex. accessing array past bounds or
+ * substrings with invalid start/end combination will both result in null.
+ *
+ * There is a lot of overlap between expr and value, so why do we need both?
+ * Primary reason is to create a clean boundary between ruleset and resolved
+ * values as it allows to distinguish easily between things that need to be
+ * resolved and things that have been lowered. Given this type system, rule
+ * engine basically performs a task of transforming exprs into values to get
+ * final result.
+ *
+ * Other important types:
+ * Parameter - definition of values that can be provided to rule engine during
+ * resolution. Can define default values if caller didn't provide a value for
+ * parameter.
+ * Request Context - set of parameter value defined for a particular request that
+ * are used during resolution
+ * Scope - set of values defined during resolution of a rule. Can grow/shrink as
+ * rules are evaluated. Ex. scope can have value with name "Region" and value "us-west-2".
+ */
+
+/*
+******************************
+* Parse types.
+******************************
+*/
+
+enum aws_endpoints_rule_type { AWS_ENDPOINTS_RULE_ENDPOINT, AWS_ENDPOINTS_RULE_ERROR, AWS_ENDPOINTS_RULE_TREE };
+
+enum aws_endpoints_expr_type {
+ AWS_ENDPOINTS_EXPR_STRING,
+ AWS_ENDPOINTS_EXPR_NUMBER,
+ AWS_ENDPOINTS_EXPR_BOOLEAN,
+ AWS_ENDPOINTS_EXPR_ARRAY,
+ AWS_ENDPOINTS_EXPR_REFERENCE,
+ AWS_ENDPOINTS_EXPR_FUNCTION
+};
+
+enum aws_endpoints_fn_type {
+ AWS_ENDPOINTS_FN_FIRST = 0,
+ AWS_ENDPOINTS_FN_IS_SET = 0,
+ AWS_ENDPOINTS_FN_NOT,
+ AWS_ENDPOINTS_FN_GET_ATTR,
+ AWS_ENDPOINTS_FN_SUBSTRING,
+ AWS_ENDPOINTS_FN_STRING_EQUALS,
+ AWS_ENDPOINTS_FN_BOOLEAN_EQUALS,
+ AWS_ENDPOINTS_FN_URI_ENCODE,
+ AWS_ENDPOINTS_FN_PARSE_URL,
+ AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL,
+ AWS_ENDPOINTS_FN_AWS_PARTITION,
+ AWS_ENDPOINTS_FN_AWS_PARSE_ARN,
+ AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET,
+ AWS_ENDPOINTS_FN_LAST,
+};
+
+struct aws_endpoints_parameter {
+ struct aws_allocator *allocator;
+
+ struct aws_byte_cursor name;
+
+ enum aws_endpoints_parameter_type type;
+ struct aws_byte_cursor built_in;
+
+ bool has_default_value;
+ union {
+ struct aws_byte_cursor string;
+ bool boolean;
+ } default_value;
+
+ bool is_required;
+ struct aws_byte_cursor documentation;
+ bool is_deprecated;
+ struct aws_byte_cursor deprecated_message;
+ struct aws_byte_cursor deprecated_since;
+};
+
+struct aws_endpoints_ruleset {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_json_value *json_root;
+
+ /* list of (aws_endpoints_rule) */
+ struct aws_array_list rules;
+
+ struct aws_byte_cursor version;
+ struct aws_byte_cursor service_id;
+ /* map of (aws_byte_cursor *) -> (aws_endpoints_parameter *) */
+ struct aws_hash_table parameters;
+};
+
+struct aws_endpoints_function {
+ enum aws_endpoints_fn_type fn;
+ /* List of (aws_endpoints_expr) */
+ struct aws_array_list argv;
+};
+
+struct aws_endpoints_expr {
+ enum aws_endpoints_expr_type type;
+ union {
+ struct aws_byte_cursor string;
+ double number;
+ bool boolean;
+ struct aws_array_list array; /* List of (aws_endpoints_expr) */
+ struct aws_byte_cursor reference;
+ struct aws_endpoints_function function;
+ } e;
+};
+
+struct aws_endpoints_rule_data_endpoint {
+ struct aws_allocator *allocator;
+ struct aws_endpoints_expr url;
+
+ /*
+ * Note: this is a custom properties json associated with the result.
+ * Properties are unstable and format can change frequently.
+ * Its up to caller to parse json to retrieve properties.
+ */
+ struct aws_byte_buf properties;
+ /* Map of (aws_string *) -> (aws_array_list * of aws_endpoints_expr) */
+ struct aws_hash_table headers;
+};
+
+struct aws_endpoints_rule_data_error {
+ struct aws_endpoints_expr error;
+};
+
+struct aws_endpoints_rule_data_tree {
+ /* List of (aws_endpoints_rule) */
+ struct aws_array_list rules;
+};
+
+struct aws_endpoints_condition {
+ struct aws_endpoints_expr expr;
+ struct aws_byte_cursor assign;
+};
+
+struct aws_endpoints_rule {
+ /* List of (aws_endpoints_condition) */
+ struct aws_array_list conditions;
+ struct aws_byte_cursor documentation;
+
+ enum aws_endpoints_rule_type type;
+ union {
+ struct aws_endpoints_rule_data_endpoint endpoint;
+ struct aws_endpoints_rule_data_error error;
+ struct aws_endpoints_rule_data_tree tree;
+ } rule_data;
+};
+
+struct aws_partition_info {
+ struct aws_allocator *allocator;
+ struct aws_byte_cursor name;
+
+ bool is_copy;
+ struct aws_string *info;
+};
+
+struct aws_partitions_config {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_json_value *json_root;
+
+ /* map of (byte_cur -> aws_partition_info) */
+ struct aws_hash_table region_to_partition_info;
+
+ struct aws_string *version;
+};
+
+/*
+******************************
+* Eval types.
+******************************
+*/
+
+enum aws_endpoints_value_type {
+ /* Special value to represent that any value type is expected from resolving an expresion.
+ Note a valid value for a value type. */
+ AWS_ENDPOINTS_VALUE_ANY,
+
+ AWS_ENDPOINTS_VALUE_NONE,
+ AWS_ENDPOINTS_VALUE_STRING,
+ AWS_ENDPOINTS_VALUE_BOOLEAN,
+ AWS_ENDPOINTS_VALUE_OBJECT, /* Generic type returned by some functions. json string under the covers. */
+ AWS_ENDPOINTS_VALUE_NUMBER,
+ AWS_ENDPOINTS_VALUE_ARRAY,
+
+ AWS_ENDPOINTS_VALUE_SIZE
+};
+
+struct aws_endpoints_request_context {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_hash_table values;
+};
+
+/* concrete type value */
+struct aws_endpoints_value {
+ enum aws_endpoints_value_type type;
+ union {
+ struct aws_owning_cursor owning_cursor_string;
+ bool boolean;
+ struct aws_owning_cursor owning_cursor_object;
+ double number;
+ struct aws_array_list array;
+ } v;
+};
+
+/* wrapper around aws_endpoints_value to store it more easily in hash table*/
+struct aws_endpoints_scope_value {
+ struct aws_allocator *allocator;
+
+ struct aws_owning_cursor name;
+
+ struct aws_endpoints_value value;
+};
+
+struct aws_endpoints_resolution_scope {
+ /* current values in scope. byte_cur -> aws_endpoints_scope_value */
+ struct aws_hash_table values;
+ /* list of value keys added since last cleanup */
+ struct aws_array_list added_keys;
+
+ /* index of the rule currently being evaluated */
+ size_t rule_idx;
+ /* pointer to rules array */
+ const struct aws_array_list *rules;
+
+ const struct aws_partitions_config *partitions;
+};
+
+struct aws_partition_info *aws_partition_info_new(struct aws_allocator *allocator, struct aws_byte_cursor name);
+void aws_partition_info_destroy(struct aws_partition_info *partition_info);
+
+struct aws_endpoints_parameter *aws_endpoints_parameter_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name);
+void aws_endpoints_parameter_destroy(struct aws_endpoints_parameter *parameter);
+
+void aws_endpoints_rule_clean_up(struct aws_endpoints_rule *rule);
+
+void aws_endpoints_rule_data_endpoint_clean_up(struct aws_endpoints_rule_data_endpoint *rule_data);
+void aws_endpoints_rule_data_error_clean_up(struct aws_endpoints_rule_data_error *rule_data);
+void aws_endpoints_rule_data_tree_clean_up(struct aws_endpoints_rule_data_tree *rule_data);
+
+void aws_endpoints_condition_clean_up(struct aws_endpoints_condition *condition);
+void aws_endpoints_function_clean_up(struct aws_endpoints_function *function);
+void aws_endpoints_expr_clean_up(struct aws_endpoints_expr *expr);
+
+struct aws_endpoints_scope_value *aws_endpoints_scope_value_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name_cur);
+void aws_endpoints_scope_value_destroy(struct aws_endpoints_scope_value *scope_value);
+
+int aws_endpoints_deep_copy_parameter_value(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_value *from,
+ struct aws_endpoints_value *to);
+
+void aws_endpoints_value_clean_up(struct aws_endpoints_value *aws_endpoints_value);
+
+/* Helper to resolve argv. Implemented in rule engine. */
+int aws_endpoints_argv_expect(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_array_list *argv,
+ size_t idx,
+ enum aws_endpoints_value_type expected_type,
+ struct aws_endpoints_value *out_value);
+
+extern uint64_t aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_LAST];
+void aws_endpoints_rule_engine_init(void);
+
+int aws_endpoints_dispatch_standard_lib_fn_resolve(
+ enum aws_endpoints_fn_type type,
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value);
+
+int aws_endpoints_path_through_array(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *eval_val,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value);
+
+int aws_endpoints_path_through_object(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_value *eval_val,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value);
+
+#endif /* AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h
new file mode 100644
index 00000000000..29a4f489769
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h
@@ -0,0 +1,136 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H
+#define AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H
+
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_string;
+struct aws_byte_buf;
+struct aws_json_value;
+
+/* Cursor that optionally owns underlying memory. */
+struct aws_owning_cursor {
+ struct aws_byte_cursor cur;
+ struct aws_string *string;
+};
+
+/* Clones string and wraps it in owning cursor. */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_create(
+ struct aws_allocator *allocator,
+ const struct aws_string *str);
+/* Creates new cursor that takes ownership of created string. */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_from_string(struct aws_string *str);
+/* Clones memory pointer to by cursor and wraps in owning cursor */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor cur);
+/* Creates owning cursor with memory pointer set to NULL */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_non_owning_cursor_create(struct aws_byte_cursor cur);
+
+/* Cleans up memory associated with the cursor */
+AWS_SDKUTILS_API void aws_owning_cursor_clean_up(struct aws_owning_cursor *cursor);
+
+/*
+ * Determine whether host cursor is IPv4 string.
+ */
+AWS_SDKUTILS_API bool aws_is_ipv4(struct aws_byte_cursor host);
+
+/*
+ * Determine whether host cursor is IPv6 string.
+ * Supports checking for uri encoded strings and scoped literals.
+ */
+AWS_SDKUTILS_API bool aws_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded);
+
+/*
+ * Determine whether label is a valid host label.
+ */
+AWS_SDKUTILS_API bool aws_is_valid_host_label(struct aws_byte_cursor label, bool allow_subdomains);
+
+/*
+ * Determines partition from region name.
+ * Note: this basically implements regex-less alternative to regexes specified in
+ * partitions file.
+ * Returns cursor indicating which partition region maps to or empty cursor if
+ * region cannot be mapped.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_map_region_to_partition(struct aws_byte_cursor region);
+
+/*
+ * Normalize uri path - make sure it starts and ends with /
+ * Will initialize out_normalized_path.
+ * In cases of error out_normalized_path will be uninitialized.
+ */
+AWS_SDKUTILS_API int aws_byte_buf_init_from_normalized_uri_path(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_buf *out_normalized_path);
+
+/*
+ * Creates new string from json value.
+ * NULL in cases of error.
+ */
+AWS_SDKUTILS_API struct aws_string *aws_string_new_from_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *value);
+
+/*
+ * Convenience helper for comparing byte cursors.
+ * Typeless for use with hash tables.
+ */
+AWS_SDKUTILS_API bool aws_endpoints_byte_cursor_eq(const void *a, const void *b);
+
+/*
+ * Helpers to do deep clean up of array list.
+ * TODO: move to aws-c-common?
+ */
+typedef void(aws_array_callback_clean_up_fn)(void *value);
+AWS_SDKUTILS_API void aws_array_list_deep_clean_up(
+ struct aws_array_list *array,
+ aws_array_callback_clean_up_fn on_clean_up_element);
+
+/* Function that resolves template. */
+typedef int(aws_endpoints_template_resolve_fn)(
+ struct aws_byte_cursor template,
+ void *user_data,
+ struct aws_owning_cursor *out_resolved);
+/*
+ * Resolve templated string and write it out to buf.
+ * Will parse templated values (i.e. values enclosed in {}) and replace them with
+ * the value returned from resolve_callback.
+ * Note: callback must be able to support syntax for pathing through value (path
+ * provided after #).
+ * Will replace escaped template delimiters ({{ and }}) with single chars.
+ * Supports replacing templated values inside json strings (controlled by
+ * is_json), by ignoring json { and } chars.
+ */
+AWS_SDKUTILS_API int aws_byte_buf_init_from_resolved_templated_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *out_buf,
+ struct aws_byte_cursor string,
+ aws_endpoints_template_resolve_fn resolve_callback,
+ void *user_data,
+ bool is_json);
+
+/*
+ * Path through json structure and return final json node in out_value.
+ * In cases of error, error is returned and out_value is set to NULL.
+ * Array access out of bounds returns success, but set out_value to NULL (to be
+ * consistent with spec).
+ *
+ * Path is defined as a string of '.' delimited fields names, that can optionally
+ * end with [] to indicate indexing.
+ * Note: only last element can be indexed.
+ * ex. path "a.b.c[5]" results in going through a, b and then c and finally
+ * taking index of 5.
+ */
+AWS_SDKUTILS_API int aws_path_through_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *root,
+ struct aws_byte_cursor path,
+ const struct aws_json_value **out_value);
+
+#endif /* AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h
new file mode 100644
index 00000000000..076a433d5bd
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#ifndef AWS_SDKUTILS_RESOURCE_NAME_H
+#define AWS_SDKUTILS_RESOURCE_NAME_H
+#pragma once
+
+#include <aws/sdkutils/sdkutils.h>
+
+#include <aws/common/byte_buf.h>
+
+struct aws_resource_name {
+ struct aws_byte_cursor partition;
+ struct aws_byte_cursor service;
+ struct aws_byte_cursor region;
+ struct aws_byte_cursor account_id;
+ struct aws_byte_cursor resource_id;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ Given an ARN "Amazon Resource Name" represented as an in memory a
+ structure representing the parts
+*/
+AWS_SDKUTILS_API
+int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input);
+
+/**
+ Calculates the space needed to write an ARN to a byte buf
+*/
+AWS_SDKUTILS_API
+int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size);
+
+/**
+ Serializes an ARN structure into the lexical string format
+*/
+AWS_SDKUTILS_API
+int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_RESOURCE_NAME_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h
new file mode 100644
index 00000000000..51d5da528d3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h
@@ -0,0 +1,51 @@
+#ifndef AWS_SDKUTILS_SDKUTILS_H
+#define AWS_SDKUTILS_SDKUTILS_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+#include <aws/common/logging.h>
+
+#include <aws/sdkutils/exports.h>
+
+struct aws_allocator;
+
+#define AWS_C_SDKUTILS_PACKAGE_ID 15
+
+enum aws_sdkutils_errors {
+ AWS_ERROR_SDKUTILS_GENERAL = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_SDKUTILS_PACKAGE_ID),
+ AWS_ERROR_SDKUTILS_PARSE_FATAL,
+ AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED,
+ AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED,
+ AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED,
+
+ AWS_ERROR_SDKUTILS_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_SDKUTILS_PACKAGE_ID)
+};
+
+enum aws_sdkutils_log_subject {
+ AWS_LS_SDKUTILS_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_SDKUTILS_PACKAGE_ID),
+ AWS_LS_SDKUTILS_PROFILE,
+ AWS_LS_SDKUTILS_ENDPOINTS_PARSING,
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE,
+ AWS_LS_SDKUTILS_ENDPOINTS_GENERAL,
+ AWS_LS_SDKUTILS_PARTITIONS_PARSING,
+
+ AWS_LS_SDKUTILS_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_SDKUTILS_PACKAGE_ID)
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_SDKUTILS_API void aws_sdkutils_library_init(struct aws_allocator *allocator);
+AWS_SDKUTILS_API void aws_sdkutils_library_clean_up(void);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_SDKUTILS_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c b/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
new file mode 100644
index 00000000000..3e25536cf71
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
@@ -0,0 +1,1592 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/environment.h>
+#include <aws/common/file.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/logging.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/aws_profile.h>
+
+#define PROPERTIES_TABLE_DEFAULT_SIZE 4
+#define PROFILE_TABLE_DEFAULT_SIZE 5
+
+struct aws_profile_property {
+ struct aws_allocator *allocator;
+ struct aws_string *name;
+ struct aws_string *value;
+ struct aws_hash_table sub_properties;
+ bool is_empty_valued;
+};
+
+struct aws_profile {
+ struct aws_allocator *allocator;
+ struct aws_string *name;
+ struct aws_hash_table properties;
+ bool has_profile_prefix;
+};
+
+struct aws_profile_collection {
+ struct aws_allocator *allocator;
+ enum aws_profile_source_type profile_source;
+ /*
+ * Array of aws_hash_table for each section type.
+ * Each table is a map from section identifier to aws_profile.
+ * key: struct aws_string*
+ * value: struct aws_profile*
+ */
+ struct aws_hash_table sections[AWS_PROFILE_SECTION_TYPE_COUNT];
+ struct aws_ref_count ref_count;
+};
+
+/*
+ * Character-based profile parse helper functions
+ */
+static bool s_is_assignment_operator(uint8_t value) {
+ return (char)value == '=';
+}
+
+static bool s_is_not_assignment_operator(uint8_t value) {
+ return !s_is_assignment_operator(value);
+}
+
+static bool s_is_identifier(uint8_t value) {
+ char value_as_char = (char)value;
+
+ if ((value_as_char >= 'A' && value_as_char <= 'Z') || (value_as_char >= 'a' && value_as_char <= 'z') ||
+ (value_as_char >= '0' && value_as_char <= '9') || value_as_char == '\\' || value_as_char == '_' ||
+ value_as_char == '-') {
+ return true;
+ }
+
+ return false;
+}
+
+static bool s_is_whitespace(uint8_t value) {
+ char value_as_char = (char)value;
+
+ switch (value_as_char) {
+ case '\t':
+ case '\n':
+ case '\r':
+ case ' ':
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool s_is_comment_token(uint8_t value) {
+ char char_value = (char)value;
+
+ return char_value == '#' || char_value == ';';
+}
+
+static bool s_is_not_comment_token(uint8_t value) {
+ return !s_is_comment_token(value);
+}
+
+static bool s_is_profile_start(uint8_t value) {
+ return (char)value == '[';
+}
+
+static bool s_is_not_profile_end(uint8_t value) {
+ return (char)value != ']';
+}
+
+static bool s_is_carriage_return(uint8_t value) {
+ return (char)value == '\r';
+}
+
+/*
+ * Line and string based parse helper functions
+ */
+static bool s_is_comment_line(const struct aws_byte_cursor *line_cursor) {
+ char first_char = *line_cursor->ptr;
+ return first_char == '#' || first_char == ';';
+}
+
+static bool s_is_whitespace_line(const struct aws_byte_cursor *line_cursor) {
+ return aws_byte_cursor_left_trim_pred(line_cursor, s_is_whitespace).len == 0;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_profile_name, "default");
+
+static bool s_is_default_profile_name(const struct aws_byte_cursor *profile_name) {
+ return aws_string_eq_byte_cursor(s_default_profile_name, profile_name);
+}
+
+/*
+ * Consume helpers
+ */
+
+/*
+ * Consumes characters as long as a predicate is satisfied. "parsed" is optional and contains the consumed range as
+ * output. Returns true if anything was consumed.
+ *
+ * On success, start is updated to the new position.
+ */
+static bool s_parse_by_character_predicate(
+ struct aws_byte_cursor *start,
+ aws_byte_predicate_fn *predicate,
+ struct aws_byte_cursor *parsed,
+ size_t maximum_allowed) {
+
+ uint8_t *current_ptr = start->ptr;
+ uint8_t *end_ptr = start->ptr + start->len;
+ if (maximum_allowed > 0 && maximum_allowed < start->len) {
+ end_ptr = start->ptr + maximum_allowed;
+ }
+
+ while (current_ptr < end_ptr) {
+ if (!predicate(*current_ptr)) {
+ break;
+ }
+
+ ++current_ptr;
+ }
+
+ size_t consumed = current_ptr - start->ptr;
+ if (parsed != NULL) {
+ parsed->ptr = start->ptr;
+ parsed->len = consumed;
+ }
+
+ aws_byte_cursor_advance(start, consumed);
+
+ return consumed > 0;
+}
+
+/*
+ * Consumes characters if they match a token string. "parsed" is optional and contains the consumed range as output.
+ * Returns true if anything was consumed.
+ *
+ * On success, start is updated to the new position.
+ */
+static bool s_parse_by_token(
+ struct aws_byte_cursor *start,
+ const struct aws_string *token,
+ struct aws_byte_cursor *parsed) {
+
+ bool matched = false;
+
+ if (token->len <= start->len) {
+ matched = strncmp((const char *)start->ptr, aws_string_c_str(token), token->len) == 0;
+ }
+
+ if (parsed != NULL) {
+ parsed->ptr = start->ptr;
+ parsed->len = matched ? token->len : 0;
+ }
+
+ if (matched) {
+ aws_byte_cursor_advance(start, token->len);
+ }
+
+ return matched;
+}
+
+/*
+ * Parse context and logging
+ */
+
+struct profile_file_parse_context {
+ const struct aws_string *source_file_path;
+ struct aws_profile_collection *profile_collection;
+ struct aws_profile *current_profile;
+ struct aws_profile_property *current_property;
+ struct aws_byte_cursor current_line;
+ int parse_error;
+ int current_line_number;
+ bool has_seen_profile;
+};
+
+AWS_STATIC_STRING_FROM_LITERAL(s_none_string, "<None>");
+
+static void s_log_parse_context(enum aws_log_level log_level, const struct profile_file_parse_context *context) {
+ AWS_LOGF(
+ log_level,
+ AWS_LS_SDKUTILS_PROFILE,
+ "Profile Parse context:\n Source File:%s\n Line: %d\n Current Profile: %s\n Current Property: %s",
+ context->source_file_path ? context->source_file_path->bytes : s_none_string->bytes,
+ context->current_line_number,
+ context->current_profile ? context->current_profile->name->bytes : s_none_string->bytes,
+ context->current_property ? context->current_property->name->bytes : s_none_string->bytes);
+}
+
+/*
+ * aws_profile_property APIs
+ */
+
+static void s_profile_property_destroy(struct aws_profile_property *property) {
+ if (property == NULL) {
+ return;
+ }
+
+ aws_string_destroy(property->name);
+ aws_string_destroy(property->value);
+
+ aws_hash_table_clean_up(&property->sub_properties);
+
+ aws_mem_release(property->allocator, property);
+}
+
+struct aws_profile_property *aws_profile_property_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *name,
+ const struct aws_byte_cursor *value) {
+
+ struct aws_profile_property *property =
+ (struct aws_profile_property *)aws_mem_acquire(allocator, sizeof(struct aws_profile_property));
+ if (property == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*property);
+ property->allocator = allocator;
+
+ if (aws_hash_table_init(
+ &property->sub_properties,
+ allocator,
+ 0,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ aws_hash_callback_string_destroy)) {
+ goto on_error;
+ }
+
+ property->value = aws_string_new_from_array(allocator, value->ptr, value->len);
+ if (property->value == NULL) {
+ goto on_error;
+ }
+
+ property->name = aws_string_new_from_array(allocator, name->ptr, name->len);
+ if (property->name == NULL) {
+ goto on_error;
+ }
+
+ property->is_empty_valued = value->len == 0;
+
+ return property;
+
+on_error:
+ s_profile_property_destroy(property);
+
+ return NULL;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_newline, "\n");
+
+/*
+ * Continuations are applied to the property value by concatenating the old value and the new value, with a '\n'
+ * in between.
+ */
+static int s_profile_property_add_continuation(
+ struct aws_profile_property *property,
+ const struct aws_byte_cursor *continuation_value) {
+
+ int result = AWS_OP_ERR;
+ struct aws_byte_buf concatenation;
+ if (aws_byte_buf_init(&concatenation, property->allocator, property->value->len + continuation_value->len + 1)) {
+ return result;
+ }
+
+ struct aws_byte_cursor old_value = aws_byte_cursor_from_string(property->value);
+ if (aws_byte_buf_append(&concatenation, &old_value)) {
+ goto on_generic_failure;
+ }
+
+ struct aws_byte_cursor newline = aws_byte_cursor_from_string(s_newline);
+ if (aws_byte_buf_append(&concatenation, &newline)) {
+ goto on_generic_failure;
+ }
+
+ if (aws_byte_buf_append(&concatenation, continuation_value)) {
+ goto on_generic_failure;
+ }
+
+ struct aws_string *new_value =
+ aws_string_new_from_array(property->allocator, concatenation.buffer, concatenation.len);
+ if (new_value == NULL) {
+ goto on_generic_failure;
+ }
+
+ result = AWS_OP_SUCCESS;
+ aws_string_destroy(property->value);
+ property->value = new_value;
+
+on_generic_failure:
+ aws_byte_buf_clean_up(&concatenation);
+
+ return result;
+}
+
+static int s_profile_property_add_sub_property(
+ struct aws_profile_property *property,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *value,
+ const struct profile_file_parse_context *context) {
+
+ struct aws_string *key_string = aws_string_new_from_array(property->allocator, key->ptr, key->len);
+ if (key_string == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_string *value_string = aws_string_new_from_array(property->allocator, value->ptr, value->len);
+ if (value_string == NULL) {
+ goto on_failure;
+ }
+
+ int was_present = 0;
+ aws_hash_table_remove(&property->sub_properties, key_string, NULL, &was_present);
+ if (was_present) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "subproperty \"%s\" of property \"%s\" had value overridden with new value",
+ key_string->bytes,
+ property->name->bytes);
+ s_log_parse_context(AWS_LL_WARN, context);
+ }
+
+ if (aws_hash_table_put(&property->sub_properties, key_string, value_string, NULL)) {
+ goto on_failure;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_failure:
+
+ if (value_string) {
+ aws_string_destroy(value_string);
+ }
+
+ aws_string_destroy(key_string);
+
+ return AWS_OP_ERR;
+}
+
+static int s_profile_property_merge(struct aws_profile_property *dest, const struct aws_profile_property *source) {
+
+ AWS_ASSERT(dest != NULL && source != NULL);
+
+ /*
+ * Source value overwrites any existing dest value
+ */
+ if (source->value) {
+ struct aws_string *new_value = aws_string_new_from_string(dest->allocator, source->value);
+ if (new_value == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (dest->value) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "property \"%s\" has value \"%s\" replaced during merge",
+ dest->name->bytes,
+ dest->value->bytes);
+ aws_string_destroy(dest->value);
+ }
+
+ dest->value = new_value;
+ }
+
+ dest->is_empty_valued = source->is_empty_valued;
+
+ /*
+ * Iterate sub properties, stomping on conflicts
+ */
+ struct aws_hash_iter source_iter = aws_hash_iter_begin(&source->sub_properties);
+ while (!aws_hash_iter_done(&source_iter)) {
+ struct aws_string *source_sub_property = (struct aws_string *)source_iter.element.value;
+
+ struct aws_string *dest_key =
+ aws_string_new_from_string(dest->allocator, (struct aws_string *)source_iter.element.key);
+ if (dest_key == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_string *dest_sub_property = aws_string_new_from_string(dest->allocator, source_sub_property);
+ if (dest_sub_property == NULL) {
+ aws_string_destroy(dest_key);
+ return AWS_OP_ERR;
+ }
+
+ int was_present = 0;
+ aws_hash_table_remove(&dest->sub_properties, dest_key, NULL, &was_present);
+ if (was_present) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "subproperty \"%s\" of property \"%s\" had value overridden during property merge",
+ dest_key->bytes,
+ dest->name->bytes);
+ }
+
+ if (aws_hash_table_put(&dest->sub_properties, dest_key, dest_sub_property, NULL)) {
+ aws_string_destroy(dest_sub_property);
+ aws_string_destroy(dest_key);
+ return AWS_OP_ERR;
+ }
+
+ aws_hash_iter_next(&source_iter);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Helper destroy function for aws_profile's hash table of properties
+ */
+static void s_property_hash_table_value_destroy(void *value) {
+ s_profile_property_destroy((struct aws_profile_property *)value);
+}
+
+/*
+ * aws_profile APIs
+ */
+
+void aws_profile_destroy(struct aws_profile *profile) {
+ if (profile == NULL) {
+ return;
+ }
+
+ aws_string_destroy(profile->name);
+
+ aws_hash_table_clean_up(&profile->properties);
+
+ aws_mem_release(profile->allocator, profile);
+}
+
+struct aws_profile *aws_profile_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *name,
+ bool has_profile_prefix) {
+
+ struct aws_profile *profile = (struct aws_profile *)aws_mem_acquire(allocator, sizeof(struct aws_profile));
+ if (profile == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*profile);
+
+ profile->name = aws_string_new_from_array(allocator, name->ptr, name->len);
+ if (profile->name == NULL) {
+ goto cleanup;
+ }
+
+ if (aws_hash_table_init(
+ &profile->properties,
+ allocator,
+ PROPERTIES_TABLE_DEFAULT_SIZE,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ NULL, /* The key is owned by the value (and destroy cleans it up), so we don't have to */
+ s_property_hash_table_value_destroy)) {
+
+ goto cleanup;
+ }
+
+ profile->allocator = allocator;
+ profile->has_profile_prefix = has_profile_prefix;
+
+ return profile;
+
+cleanup:
+ aws_profile_destroy(profile);
+
+ return NULL;
+}
+
+/*
+ * Adds a property to a profile.
+ *
+ * If a property already exists then the old one is removed and replaced by the
+ * new one.
+ */
+static struct aws_profile_property *s_profile_add_property(
+ struct aws_profile *profile,
+ const struct aws_byte_cursor *key_cursor,
+ const struct aws_byte_cursor *value_cursor) {
+
+ struct aws_profile_property *property = aws_profile_property_new(profile->allocator, key_cursor, value_cursor);
+ if (property == NULL) {
+ goto on_property_new_failure;
+ }
+
+ if (aws_hash_table_put(&profile->properties, property->name, property, NULL)) {
+ goto on_hash_table_put_failure;
+ }
+
+ return property;
+
+on_hash_table_put_failure:
+ s_profile_property_destroy(property);
+
+on_property_new_failure:
+ return NULL;
+}
+
+const struct aws_profile_property *aws_profile_get_property(
+ const struct aws_profile *profile,
+ const struct aws_string *property_name) {
+
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&profile->properties, property_name, &element);
+
+ if (element == NULL) {
+ return NULL;
+ }
+
+ return element->value;
+}
+
+const struct aws_string *aws_profile_property_get_value(const struct aws_profile_property *property) {
+ AWS_PRECONDITION(property);
+ return property->value;
+}
+
+static int s_profile_merge(struct aws_profile *dest_profile, const struct aws_profile *source_profile) {
+
+ AWS_ASSERT(dest_profile != NULL && source_profile != NULL);
+
+ dest_profile->has_profile_prefix = source_profile->has_profile_prefix;
+
+ struct aws_hash_iter source_iter = aws_hash_iter_begin(&source_profile->properties);
+ while (!aws_hash_iter_done(&source_iter)) {
+ struct aws_profile_property *source_property = (struct aws_profile_property *)source_iter.element.value;
+ struct aws_profile_property *dest_property = (struct aws_profile_property *)aws_profile_get_property(
+ dest_profile, (struct aws_string *)source_iter.element.key);
+ if (dest_property == NULL) {
+
+ struct aws_byte_cursor empty_value;
+ AWS_ZERO_STRUCT(empty_value);
+
+ struct aws_byte_cursor property_name = aws_byte_cursor_from_string(source_iter.element.key);
+ dest_property = aws_profile_property_new(dest_profile->allocator, &property_name, &empty_value);
+ if (dest_property == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_hash_table_put(&dest_profile->properties, dest_property->name, dest_property, NULL)) {
+ s_profile_property_destroy(dest_property);
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (s_profile_property_merge(dest_property, source_property)) {
+ return AWS_OP_ERR;
+ }
+
+ aws_hash_iter_next(&source_iter);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Hash table destroy helper for profile collection's profiles member
+ */
+static void s_profile_hash_table_value_destroy(void *value) {
+ aws_profile_destroy((struct aws_profile *)value);
+}
+
+/*
+ * aws_profile_collection APIs
+ */
+
+void aws_profile_collection_destroy(struct aws_profile_collection *profile_collection) {
+ aws_profile_collection_release(profile_collection);
+}
+
+static void s_aws_profile_collection_destroy_internal(struct aws_profile_collection *profile_collection) {
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ aws_hash_table_clean_up(&profile_collection->sections[i]);
+ }
+ aws_mem_release(profile_collection->allocator, profile_collection);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_profile_token, "profile");
+AWS_STATIC_STRING_FROM_LITERAL(s_sso_session_token, "sso-session");
+
+const struct aws_profile *aws_profile_collection_get_profile(
+ const struct aws_profile_collection *profile_collection,
+ const struct aws_string *profile_name) {
+ return aws_profile_collection_get_section(profile_collection, AWS_PROFILE_SECTION_TYPE_PROFILE, profile_name);
+}
+
+const struct aws_profile *aws_profile_collection_get_section(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type,
+ const struct aws_string *section_name) {
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&profile_collection->sections[section_type], section_name, &element);
+ if (element == NULL) {
+ return NULL;
+ }
+ return element->value;
+}
+
+static int s_profile_collection_add_profile(
+ struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type,
+ const struct aws_byte_cursor *profile_name,
+ bool has_prefix,
+ const struct profile_file_parse_context *context,
+ struct aws_profile **current_profile_out) {
+
+ *current_profile_out = NULL;
+ struct aws_string *key =
+ aws_string_new_from_array(profile_collection->allocator, profile_name->ptr, profile_name->len);
+ if (key == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_profile *existing_profile = NULL;
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&profile_collection->sections[section_type], key, &element);
+ if (element != NULL) {
+ existing_profile = element->value;
+ }
+
+ aws_string_destroy(key);
+
+ if (section_type == AWS_PROFILE_SECTION_TYPE_PROFILE && profile_collection->profile_source == AWS_PST_CONFIG &&
+ s_is_default_profile_name(profile_name)) {
+ /*
+ * In a config file, "profile default" always supercedes "default"
+ */
+ if (!has_prefix && existing_profile && existing_profile->has_profile_prefix) {
+ /*
+ * existing one supercedes: ignore this (and its properties) completely by failing the add
+ * which sets the current profile to NULL
+ */
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Existing prefixed default config profile supercedes unprefixed default profile");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ if (has_prefix && existing_profile && !existing_profile->has_profile_prefix) {
+ /*
+ * stomp over existing: remove it, then proceed with add
+ * element destroy function will clean up the profile and key
+ */
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE, "Prefixed default config profile replacing unprefixed default profile");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ aws_hash_table_remove(&profile_collection->sections[section_type], element->key, NULL, NULL);
+ existing_profile = NULL;
+ }
+ }
+
+ if (existing_profile) {
+ *current_profile_out = existing_profile;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_profile *new_profile = aws_profile_new(profile_collection->allocator, profile_name, has_prefix);
+ if (new_profile == NULL) {
+ goto on_aws_profile_new_failure;
+ }
+
+ if (aws_hash_table_put(&profile_collection->sections[section_type], new_profile->name, new_profile, NULL)) {
+ goto on_hash_table_put_failure;
+ }
+
+ *current_profile_out = new_profile;
+ return AWS_OP_SUCCESS;
+
+on_hash_table_put_failure:
+ aws_profile_destroy(new_profile);
+
+on_aws_profile_new_failure:
+ return AWS_OP_ERR;
+}
+
+static int s_profile_collection_merge(
+ struct aws_profile_collection *dest_collection,
+ const struct aws_profile_collection *source_collection) {
+
+ AWS_ASSERT(dest_collection != NULL && source_collection);
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ struct aws_hash_iter source_iter = aws_hash_iter_begin(&source_collection->sections[i]);
+ while (!aws_hash_iter_done(&source_iter)) {
+ struct aws_profile *source_profile = (struct aws_profile *)source_iter.element.value;
+ struct aws_profile *dest_profile = (struct aws_profile *)aws_profile_collection_get_profile(
+ dest_collection, (struct aws_string *)source_iter.element.key);
+
+ if (dest_profile == NULL) {
+
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_string(source_iter.element.key);
+ dest_profile =
+ aws_profile_new(dest_collection->allocator, &name_cursor, source_profile->has_profile_prefix);
+ if (dest_profile == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_hash_table_put(&dest_collection->sections[i], dest_profile->name, dest_profile, NULL)) {
+ aws_profile_destroy(dest_profile);
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (s_profile_merge(dest_profile, source_profile)) {
+ return AWS_OP_ERR;
+ }
+
+ aws_hash_iter_next(&source_iter);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_profile_collection *aws_profile_collection_new_from_merge(
+ struct aws_allocator *allocator,
+ const struct aws_profile_collection *config_profiles,
+ const struct aws_profile_collection *credentials_profiles) {
+
+ struct aws_profile_collection *merged =
+ (struct aws_profile_collection *)(aws_mem_acquire(allocator, sizeof(struct aws_profile_collection)));
+ if (merged == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*merged);
+ aws_ref_count_init(
+ &merged->ref_count, merged, (aws_simple_completion_callback *)s_aws_profile_collection_destroy_internal);
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ size_t max_profiles = 0;
+ if (config_profiles != NULL) {
+ max_profiles += aws_hash_table_get_entry_count(&config_profiles->sections[i]);
+ }
+ if (credentials_profiles != NULL) {
+ max_profiles += aws_hash_table_get_entry_count(&credentials_profiles->sections[i]);
+ }
+
+ merged->allocator = allocator;
+ merged->profile_source = AWS_PST_NONE;
+
+ if (aws_hash_table_init(
+ &merged->sections[i],
+ allocator,
+ max_profiles,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ NULL,
+ s_profile_hash_table_value_destroy)) {
+ goto cleanup;
+ }
+ }
+
+ if (config_profiles != NULL) {
+ if (s_profile_collection_merge(merged, config_profiles)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to merge config profile set");
+ goto cleanup;
+ }
+ }
+
+ if (credentials_profiles != NULL) {
+ if (s_profile_collection_merge(merged, credentials_profiles)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to merge credentials profile set");
+ goto cleanup;
+ }
+ }
+
+ return merged;
+
+cleanup:
+ s_aws_profile_collection_destroy_internal(merged);
+
+ return NULL;
+}
+
+/*
+ * Profile parsing
+ */
+
+/*
+ * The comment situation in config files is messy. Some line types require a comment to have at least one
+ * whitespace in front of it, while other line types only require a comment token (;, #) On top of that, some
+ * line types do not allow comments at all (get folded into the value).
+ *
+ */
+
+/*
+ * a trailing comment is started by ';' or '#'
+ * Only certain types of lines allow comments without prefixing whitespace
+ */
+static struct aws_byte_cursor s_trim_trailing_comment(const struct aws_byte_cursor *line) {
+
+ struct aws_byte_cursor line_copy = *line;
+ struct aws_byte_cursor trimmed;
+ s_parse_by_character_predicate(&line_copy, s_is_not_comment_token, &trimmed, 0);
+
+ return trimmed;
+}
+
+/*
+ * A trailing whitespace comment is started by " ;", " #", "\t;", or "\t#"
+ * Certain types of lines require comments be whitespace-prefixed
+ */
+static struct aws_byte_cursor s_trim_trailing_whitespace_comment(const struct aws_byte_cursor *line) {
+ struct aws_byte_cursor trimmed;
+ trimmed.ptr = line->ptr;
+
+ uint8_t *current_ptr = line->ptr;
+ uint8_t *end_ptr = line->ptr + line->len;
+
+ while (current_ptr < end_ptr) {
+ if (s_is_whitespace(*current_ptr)) {
+ /*
+ * Look ahead 1
+ */
+ if (current_ptr + 1 < end_ptr && s_is_comment_token(*(current_ptr + 1))) {
+ break;
+ }
+ }
+
+ current_ptr++;
+ }
+
+ trimmed.len = current_ptr - line->ptr;
+
+ return trimmed;
+}
+
+/**
+ * Attempts to parse profile declaration lines
+ *
+ * Return false if this is not a profile declaration, true otherwise (stop parsing the line)
+ */
+static bool s_parse_profile_declaration(
+ const struct aws_byte_cursor *line_cursor,
+ struct profile_file_parse_context *context) {
+
+ /*
+ * Strip comment and right-side whitespace
+ */
+ struct aws_byte_cursor profile_line_cursor = s_trim_trailing_comment(line_cursor);
+ struct aws_byte_cursor profile_cursor = aws_byte_cursor_right_trim_pred(&profile_line_cursor, s_is_whitespace);
+
+ /*
+ * "[" + <whitespace>? + <"profile ">? + <profile name = identifier> + <whitespace>? + "]"
+ */
+ if (!s_parse_by_character_predicate(&profile_cursor, s_is_profile_start, NULL, 1)) {
+ /*
+ * This isn't a profile declaration, try something else
+ */
+ return false;
+ }
+
+ context->has_seen_profile = true;
+ context->current_profile = NULL;
+ context->current_property = NULL;
+
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+ enum aws_profile_section_type section_type = AWS_PROFILE_SECTION_TYPE_PROFILE;
+
+ /*
+ * Check if the profile name starts with the 'profile' keyword. We need to check for
+ * "profile" and at least one whitespace character. A partial match
+ * ("[profilefoo]" for example) should rewind and use the whole name properly.
+ */
+ struct aws_byte_cursor backtrack_cursor = profile_cursor;
+ bool has_profile_prefix = s_parse_by_token(&profile_cursor, s_profile_token, NULL) &&
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 1);
+ bool has_sso_session_prefix = !has_profile_prefix && s_parse_by_token(&profile_cursor, s_sso_session_token, NULL) &&
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 1);
+
+ if (has_profile_prefix) {
+ if (context->profile_collection->profile_source == AWS_PST_CREDENTIALS) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Profile declarations in credentials files are not allowed to begin with the \"profile\" keyword");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+ } else if (has_sso_session_prefix) {
+ if (context->profile_collection->profile_source == AWS_PST_CREDENTIALS) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "sso-session declarations in credentials files are not allowed");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+ section_type = AWS_PROFILE_SECTION_TYPE_SSO_SESSION;
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+ } else {
+ profile_cursor = backtrack_cursor;
+ }
+
+ struct aws_byte_cursor profile_name;
+ if (!s_parse_by_character_predicate(&profile_cursor, s_is_identifier, &profile_name, 0)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Profile declarations must contain a valid identifier for a name");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ if (context->profile_collection->profile_source == AWS_PST_CONFIG && !has_profile_prefix &&
+ !s_is_default_profile_name(&profile_name) && !has_sso_session_prefix) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Non-default profile declarations in config files must use the \"profile\" keyword");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+
+ /*
+ * Special case the right side bracket check. We need to distinguish between a missing right bracket
+ * (fatal error) and invalid profile name (spaces, non-identifier characters).
+ *
+ * Do so by consuming all non right-bracket characters. If the remainder is empty it is missing,
+ * otherwise it is an invalid profile name (non-empty invalid_chars) or a good definition
+ * (empty invalid_chars cursor).
+ */
+ struct aws_byte_cursor invalid_chars;
+ s_parse_by_character_predicate(&profile_cursor, s_is_not_profile_end, &invalid_chars, 0);
+ if (profile_cursor.len == 0) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Profile declaration missing required ending bracket");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ if (invalid_chars.len > 0) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Profile declaration contains invalid characters: \"" PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(invalid_chars));
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ /*
+ * Apply to the profile collection
+ */
+ if (s_profile_collection_add_profile(
+ context->profile_collection,
+ section_type,
+ &profile_name,
+ has_profile_prefix,
+ context,
+ &context->current_profile)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to add profile to profile collection");
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ return true;
+}
+
+/**
+ * Attempts to parse property continuation lines
+ *
+ * Return false if this is not a property continuation line, true otherwise (stop parsing the line)
+ */
+static bool s_parse_property_continuation(
+ const struct aws_byte_cursor *line_cursor,
+ struct profile_file_parse_context *context) {
+
+ /*
+ * Strip right-side whitespace only. Comments cannot be made on continuation lines. They
+ * get folded into the value.
+ */
+ struct aws_byte_cursor continuation_cursor = aws_byte_cursor_right_trim_pred(line_cursor, s_is_whitespace);
+
+ /*
+ * Can't be a continuation without at least one whitespace on the left
+ */
+ if (!s_parse_by_character_predicate(&continuation_cursor, s_is_whitespace, NULL, 0)) {
+ return false;
+ }
+
+ /*
+ * This should never happen since it should have been caught as a whitespace line
+ */
+ if (continuation_cursor.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Property continuation internal parsing error");
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ /*
+ * A continuation without a current property is bad
+ */
+ if (context->current_profile == NULL || context->current_property == NULL) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property continuation seen outside of a current property");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ if (s_profile_property_add_continuation(context->current_property, &continuation_cursor)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property continuation could not be applied to the current property");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ if (context->current_property->is_empty_valued) {
+
+ struct aws_byte_cursor key_cursor;
+ if (!s_parse_by_character_predicate(&continuation_cursor, s_is_not_assignment_operator, &key_cursor, 0)) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE, "Empty-valued property continuation must contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ if (!s_parse_by_character_predicate(&continuation_cursor, s_is_assignment_operator, NULL, 1)) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE, "Empty-valued property continuation must contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ struct aws_byte_cursor trimmed_key_cursor = aws_byte_cursor_right_trim_pred(&key_cursor, s_is_whitespace);
+ struct aws_byte_cursor id_check_cursor = aws_byte_cursor_trim_pred(&trimmed_key_cursor, s_is_identifier);
+ if (id_check_cursor.len > 0) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Empty-valued property continuation must have a valid identifier to the left of the assignment");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&continuation_cursor, s_is_whitespace, NULL, 0);
+
+ /*
+ * everything left in the continuation_cursor is the sub property value
+ */
+ if (s_profile_property_add_sub_property(
+ context->current_property, &trimmed_key_cursor, &continuation_cursor, context)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Internal error adding sub property to current property");
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Attempts to parse property lines
+ *
+ * Return false if this is not a property line, true otherwise (stop parsing the line)
+ */
+static bool s_parse_property(const struct aws_byte_cursor *line_cursor, struct profile_file_parse_context *context) {
+
+ /*
+ * Strip whitespace-prefixed comment and right-side whitespace
+ */
+ struct aws_byte_cursor property_line_cursor = s_trim_trailing_whitespace_comment(line_cursor);
+ struct aws_byte_cursor property_cursor = aws_byte_cursor_right_trim_pred(&property_line_cursor, s_is_whitespace);
+
+ context->current_property = NULL;
+
+ struct aws_byte_cursor key_cursor;
+ if (!s_parse_by_character_predicate(&property_cursor, s_is_not_assignment_operator, &key_cursor, 0)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ struct aws_byte_cursor trimmed_key_cursor = aws_byte_cursor_right_trim_pred(&key_cursor, s_is_whitespace);
+ struct aws_byte_cursor id_check_cursor = aws_byte_cursor_trim_pred(&trimmed_key_cursor, s_is_identifier);
+ if (id_check_cursor.len > 0) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not begin with a valid identifier");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ if (!s_parse_by_character_predicate(&property_cursor, s_is_assignment_operator, NULL, 1)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&property_cursor, s_is_whitespace, NULL, 0);
+
+ /*
+ * If appropriate, apply to the profile collection, property_cursor contains the trimmed value, if one exists
+ */
+ if (context->current_profile != NULL) {
+ context->current_property =
+ s_profile_add_property(context->current_profile, &trimmed_key_cursor, &property_cursor);
+ if (context->current_property == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Failed to add property \"" PRInSTR "\" to current profile \"%s\"",
+ AWS_BYTE_CURSOR_PRI(trimmed_key_cursor),
+ context->current_profile->name->bytes);
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ }
+ } else {
+ /*
+ * By definition, if we haven't seen any profiles yet, this is a fatal error
+ */
+ if (context->has_seen_profile) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition seen outside a profile");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ } else {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition seen before any profiles");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ }
+ }
+
+ return true;
+}
+
+static void s_parse_and_apply_line_to_profile_collection(
+ struct profile_file_parse_context *context,
+ const struct aws_byte_cursor *line_cursor) {
+
+ /*
+ * Ignore line feed on windows
+ */
+ struct aws_byte_cursor line = aws_byte_cursor_right_trim_pred(line_cursor, s_is_carriage_return);
+ if (line.len == 0 || s_is_comment_line(&line) || s_is_whitespace_line(&line)) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Parsing aws profile line in profile \"%s\", current property: \"%s\"",
+ context->current_profile ? context->current_profile->name->bytes : s_none_string->bytes,
+ context->current_property ? context->current_property->name->bytes : s_none_string->bytes);
+
+ if (s_parse_profile_declaration(&line, context)) {
+ return;
+ }
+
+ if (s_parse_property_continuation(&line, context)) {
+ return;
+ }
+
+ if (s_parse_property(&line, context)) {
+ return;
+ }
+
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Unidentifiable line type encountered while parsing profile file");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+}
+
+static struct aws_profile_collection *s_aws_profile_collection_new_internal(
+ struct aws_allocator *allocator,
+ const struct aws_byte_buf *buffer,
+ enum aws_profile_source_type source,
+ const struct aws_string *path) {
+
+ struct aws_profile_collection *profile_collection =
+ (struct aws_profile_collection *)aws_mem_acquire(allocator, sizeof(struct aws_profile_collection));
+ if (profile_collection == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*profile_collection);
+
+ profile_collection->profile_source = source;
+ profile_collection->allocator = allocator;
+
+ aws_ref_count_init(
+ &profile_collection->ref_count,
+ profile_collection,
+ (aws_simple_completion_callback *)s_aws_profile_collection_destroy_internal);
+
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ if (aws_hash_table_init(
+ &profile_collection->sections[i],
+ allocator,
+ PROFILE_TABLE_DEFAULT_SIZE,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ NULL, /* The key is owned by the value (and destroy cleans it up), so we don't have to */
+ s_profile_hash_table_value_destroy)) {
+ goto cleanup;
+ }
+ }
+
+ struct aws_byte_cursor current_position = aws_byte_cursor_from_buf(buffer);
+
+ if (current_position.len > 0) {
+ struct aws_byte_cursor line_cursor;
+ AWS_ZERO_STRUCT(line_cursor);
+
+ struct profile_file_parse_context context;
+ AWS_ZERO_STRUCT(context);
+ context.current_line_number = 1;
+ context.profile_collection = profile_collection;
+ context.source_file_path = path;
+
+ while (aws_byte_cursor_next_split(&current_position, '\n', &line_cursor)) {
+ context.current_line = line_cursor;
+
+ s_parse_and_apply_line_to_profile_collection(&context, &line_cursor);
+ if (context.parse_error == AWS_ERROR_SDKUTILS_PARSE_FATAL) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Fatal error while parsing aws profile collection");
+ goto cleanup;
+ }
+
+ aws_byte_cursor_advance(&current_position, line_cursor.len + 1);
+ ++context.current_line_number;
+ }
+ }
+
+ return profile_collection;
+
+cleanup:
+ s_aws_profile_collection_destroy_internal(profile_collection);
+
+ return NULL;
+}
+
+struct aws_profile_collection *aws_profile_collection_acquire(struct aws_profile_collection *collection) {
+ if (collection != NULL) {
+ aws_ref_count_acquire(&collection->ref_count);
+ }
+
+ return collection;
+}
+
+struct aws_profile_collection *aws_profile_collection_release(struct aws_profile_collection *collection) {
+ if (collection != NULL) {
+ aws_ref_count_release(&collection->ref_count);
+ }
+
+ return NULL;
+}
+
+struct aws_profile_collection *aws_profile_collection_new_from_file(
+ struct aws_allocator *allocator,
+ const struct aws_string *file_path,
+ enum aws_profile_source_type source) {
+
+ struct aws_byte_buf file_contents;
+ AWS_ZERO_STRUCT(file_contents);
+
+ AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_PROFILE, "Creating profile collection from file at \"%s\"", file_path->bytes);
+
+ if (aws_byte_buf_init_from_file(&file_contents, allocator, aws_string_c_str(file_path)) != 0) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Failed to read file at \"%s\"", file_path->bytes);
+ return NULL;
+ }
+
+ struct aws_profile_collection *profile_collection =
+ s_aws_profile_collection_new_internal(allocator, &file_contents, source, file_path);
+
+ aws_byte_buf_clean_up(&file_contents);
+
+ return profile_collection;
+}
+
+struct aws_profile_collection *aws_profile_collection_new_from_buffer(
+ struct aws_allocator *allocator,
+ const struct aws_byte_buf *buffer,
+ enum aws_profile_source_type source) {
+
+ return s_aws_profile_collection_new_internal(allocator, buffer, source, NULL);
+}
+
+static struct aws_string *s_process_profile_file_path(struct aws_allocator *allocator, const struct aws_string *path) {
+ struct aws_string *final_path = NULL;
+
+ /*
+ * Make a copy to mess with
+ */
+ struct aws_string *path_copy = aws_string_new_from_string(allocator, path);
+ if (path_copy == NULL) {
+ return NULL;
+ }
+
+ struct aws_string *home_directory = NULL;
+
+ /*
+ * Fake directory cursor for final directory construction
+ */
+ char local_platform_separator = aws_get_platform_directory_separator();
+ struct aws_byte_cursor separator_cursor;
+ AWS_ZERO_STRUCT(separator_cursor);
+ separator_cursor.ptr = (uint8_t *)&local_platform_separator;
+ separator_cursor.len = 1;
+
+ for (size_t i = 0; i < path_copy->len; ++i) {
+ char value = path_copy->bytes[i];
+ if (aws_is_any_directory_separator(value)) {
+ ((char *)(path_copy->bytes))[i] = local_platform_separator;
+ }
+ }
+
+ /*
+ * Process a split on the local separator, which we now know is the only one present in the string.
+ *
+ * While this does not conform fully to the SEP governing profile file path resolution, it covers
+ * a useful, cross-platform subset of functionality that the full implementation will be backwards compatible with.
+ */
+ struct aws_array_list path_segments;
+ if (aws_array_list_init_dynamic(&path_segments, allocator, 10, sizeof(struct aws_byte_cursor))) {
+ goto on_array_list_init_failure;
+ }
+
+ struct aws_byte_cursor path_cursor = aws_byte_cursor_from_string(path_copy);
+ if (aws_byte_cursor_split_on_char(&path_cursor, local_platform_separator, &path_segments)) {
+ goto on_split_failure;
+ }
+
+ size_t final_string_length = 0;
+ size_t path_segment_count = aws_array_list_length(&path_segments);
+ for (size_t i = 0; i < path_segment_count; ++i) {
+ struct aws_byte_cursor segment_cursor;
+ AWS_ZERO_STRUCT(segment_cursor);
+
+ if (aws_array_list_get_at(&path_segments, &segment_cursor, i)) {
+ continue;
+ }
+
+ /*
+ * Current support: if and only if the first segment is just '~' then replace it
+ * with the current home directory based on SEP home directory resolution rules.
+ *
+ * Support for (pathological but proper) paths with embedded ~ ("../../~/etc...") and
+ * cross-user ~ ("~someone/.aws/credentials") can come later. As it stands, they will
+ * potentially succeed on unix platforms but not Windows.
+ */
+ if (i == 0 && segment_cursor.len == 1 && *segment_cursor.ptr == '~') {
+ if (home_directory == NULL) {
+ home_directory = aws_get_home_directory(allocator);
+
+ if (AWS_UNLIKELY(!home_directory)) {
+ goto on_empty_path;
+ }
+ }
+
+ final_string_length += home_directory->len;
+ } else {
+ final_string_length += segment_cursor.len;
+ }
+ }
+
+ if (path_segment_count > 1) {
+ final_string_length += path_segment_count - 1;
+ }
+
+ if (final_string_length == 0) {
+ goto on_empty_path;
+ }
+
+ /*
+ * Build the final path from the split + a possible home directory resolution
+ */
+ struct aws_byte_buf result;
+ aws_byte_buf_init(&result, allocator, final_string_length);
+ for (size_t i = 0; i < path_segment_count; ++i) {
+ struct aws_byte_cursor segment_cursor;
+ AWS_ZERO_STRUCT(segment_cursor);
+
+ if (aws_array_list_get_at(&path_segments, &segment_cursor, i)) {
+ continue;
+ }
+
+ /*
+ * See above for explanation
+ */
+ if (i == 0 && segment_cursor.len == 1 && *segment_cursor.ptr == '~') {
+ if (home_directory == NULL) {
+ goto on_home_directory_failure;
+ }
+ struct aws_byte_cursor home_cursor = aws_byte_cursor_from_string(home_directory);
+ if (aws_byte_buf_append(&result, &home_cursor)) {
+ goto on_byte_buf_write_failure;
+ }
+ } else {
+ if (aws_byte_buf_append(&result, &segment_cursor)) {
+ goto on_byte_buf_write_failure;
+ }
+ }
+
+ /*
+ * Add the separator after all but the last segment
+ */
+ if (i + 1 < path_segment_count) {
+ if (aws_byte_buf_append(&result, &separator_cursor)) {
+ goto on_byte_buf_write_failure;
+ }
+ }
+ }
+
+ final_path = aws_string_new_from_array(allocator, result.buffer, result.len);
+
+/*
+ * clean up
+ */
+on_byte_buf_write_failure:
+ aws_byte_buf_clean_up(&result);
+
+on_empty_path:
+on_home_directory_failure:
+on_split_failure:
+ aws_array_list_clean_up(&path_segments);
+
+on_array_list_init_failure:
+ aws_string_destroy(path_copy);
+
+ if (home_directory != NULL) {
+ aws_string_destroy(home_directory);
+ }
+
+ return final_path;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_credentials_path, "~/.aws/credentials");
+AWS_STATIC_STRING_FROM_LITERAL(s_credentials_file_path_env_variable_name, "AWS_SHARED_CREDENTIALS_FILE");
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_config_path, "~/.aws/config");
+AWS_STATIC_STRING_FROM_LITERAL(s_config_file_path_env_variable_name, "AWS_CONFIG_FILE");
+
+static struct aws_string *s_get_raw_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path,
+ const struct aws_string *override_env_var_name,
+ const struct aws_string *default_path) {
+
+ if (override_path != NULL && override_path->ptr != NULL) {
+ return aws_string_new_from_array(allocator, override_path->ptr, override_path->len);
+ }
+
+ struct aws_string *env_override_path = NULL;
+ if (aws_get_environment_value(allocator, override_env_var_name, &env_override_path) == 0 &&
+ env_override_path != NULL) {
+ return env_override_path;
+ }
+
+ return aws_string_new_from_string(allocator, default_path);
+}
+
+struct aws_string *aws_get_credentials_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path) {
+
+ struct aws_string *raw_path = s_get_raw_file_path(
+ allocator, override_path, s_credentials_file_path_env_variable_name, s_default_credentials_path);
+
+ struct aws_string *final_path = s_process_profile_file_path(allocator, raw_path);
+
+ aws_string_destroy(raw_path);
+
+ return final_path;
+}
+
+struct aws_string *aws_get_config_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path) {
+
+ struct aws_string *raw_path =
+ s_get_raw_file_path(allocator, override_path, s_config_file_path_env_variable_name, s_default_config_path);
+
+ struct aws_string *final_path = s_process_profile_file_path(allocator, raw_path);
+
+ aws_string_destroy(raw_path);
+
+ return final_path;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_profile_env_variable_name, "AWS_PROFILE");
+
+struct aws_string *aws_get_profile_name(struct aws_allocator *allocator, const struct aws_byte_cursor *override_name) {
+
+ struct aws_string *profile_name = NULL;
+
+ if (aws_get_environment_value(allocator, s_default_profile_env_variable_name, &profile_name) ||
+ profile_name == NULL) {
+ if (override_name != NULL && override_name->ptr != NULL) {
+ profile_name = aws_string_new_from_array(allocator, override_name->ptr, override_name->len);
+ } else {
+ profile_name = aws_string_new_from_string(allocator, s_default_profile_name);
+ }
+ }
+
+ return profile_name;
+}
+
+size_t aws_profile_get_property_count(const struct aws_profile *profile) {
+ return aws_hash_table_get_entry_count(&profile->properties);
+}
+
+size_t aws_profile_collection_get_profile_count(const struct aws_profile_collection *profile_collection) {
+ return aws_hash_table_get_entry_count(&profile_collection->sections[AWS_PROFILE_SECTION_TYPE_PROFILE]);
+}
+
+size_t aws_profile_collection_get_section_count(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type) {
+ return aws_hash_table_get_entry_count(&profile_collection->sections[section_type]);
+}
+
+size_t aws_profile_property_get_sub_property_count(const struct aws_profile_property *property) {
+ return aws_hash_table_get_entry_count(&property->sub_properties);
+}
+
+const struct aws_string *aws_profile_property_get_sub_property(
+ const struct aws_profile_property *property,
+ const struct aws_string *sub_property_name) {
+ struct aws_hash_element *element = NULL;
+
+ if (aws_hash_table_find(&property->sub_properties, sub_property_name, &element) || element == NULL) {
+ return NULL;
+ }
+
+ return (const struct aws_string *)element->value;
+}
+
+const struct aws_string *aws_profile_get_name(const struct aws_profile *profile) {
+ AWS_PRECONDITION(profile);
+ return profile->name;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
new file mode 100644
index 00000000000..556450b6978
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
@@ -0,0 +1,1132 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/json.h>
+#include <aws/common/macros.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/partitions.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+/* TODO: checking for unknown enum values is annoying and is brittle. compile
+time assert on enum size or members would make it a lot simpler. */
+
+/*
+ * How rule resolution works.
+ * Note: read comments in endpoint_types_impl.h first to understand type system.
+ *
+ * Initial scope is created from parameters defined in request context and
+ * default values defined in ruleset (s_init_top_level_scope). Validation that
+ * all required parameters have values is done at this point as well.
+ *
+ * Rules are then resolved sequentially against scope.
+ * First list of conditions associated with the rule is resolved
+ * (s_resolve_conditions). Final result of conditions resolution is an AND of
+ * truthiness of resolved values (as defined in is_value_truthy) for each
+ * condition. If resolution is true then rule is selected.
+ * - For endpoint and error rules that means terminal state is reached and rule
+ * data is returned
+ * - For tree rule, the engine starts resolving rules associated with tree rule.
+ * Note: tree rules are terminal and once engine jumps into tree rule
+ * resolution there is no way to jump back out.
+ *
+ * Conditions can add values to scope. Those values are valid for the duration of
+ * rule resolution. Note: for tree rules, any values added in tree conditions are
+ * valid for all rules within the tree.
+ * Scope can be though of as a 'leveled' structure. Top level or 0 level
+ * represents all values from context and defaults. Levels 1 and up represent
+ * values added by rules. Ex. if we start at level 0, all values added by rule
+ * can be though of as level 1.
+ * Since tree rule cannot be exited from, engine is simplified by making all
+ * values in scope top level whenever tree is jumped into. So in practice engine
+ * goes back between top level and first level as resolving rules. If that
+ * changes in future, scope can add explicit level number and cleanup only values
+ * at that level when going to next rule.
+ *
+ * Overall flow is as follows:
+ * - Start with any values provided in context as scope
+ * - Add any default values provided in ruleset and validate all required
+ * params are specified.
+ * - Iterate through rules and resolve each rule:
+ * -- resolve conditions with side effects
+ * -- if conditions are truthy return rule result
+ * -- if conditions are truthy and rule is tree, jump down a level and
+ * restart resolution with tree rules
+ * -- if conditions are falsy, rollback level and go to next rule
+ * - if no rules match, resolution fails with exhausted error.
+ */
+
+struct resolve_template_callback_data {
+ struct aws_allocator *allocator;
+ struct aws_endpoints_resolution_scope *scope;
+};
+
+AWS_STATIC_ASSERT(AWS_ENDPOINTS_VALUE_SIZE == 7);
+static bool is_value_truthy(const struct aws_endpoints_value *value) {
+ switch (value->type) {
+ case AWS_ENDPOINTS_VALUE_NONE:
+ return false;
+ case AWS_ENDPOINTS_VALUE_BOOLEAN:
+ return value->v.boolean;
+ case AWS_ENDPOINTS_VALUE_ARRAY:
+ case AWS_ENDPOINTS_VALUE_STRING:
+ case AWS_ENDPOINTS_VALUE_OBJECT:
+ return true;
+ case AWS_ENDPOINTS_VALUE_NUMBER:
+ return value->v.number != 0;
+ default:
+ AWS_ASSERT(false);
+ return false;
+ }
+}
+
+void s_scope_value_destroy_cb(void *data) {
+ struct aws_endpoints_scope_value *value = data;
+ aws_endpoints_scope_value_destroy(value);
+}
+
+static int s_deep_copy_context_to_scope(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_request_context *context,
+ struct aws_endpoints_resolution_scope *scope) {
+
+ struct aws_endpoints_scope_value *new_value = NULL;
+
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(&context->values); !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+
+ struct aws_endpoints_scope_value *context_value = (struct aws_endpoints_scope_value *)iter.element.value;
+
+ new_value = aws_endpoints_scope_value_new(allocator, context_value->name.cur);
+ if (aws_endpoints_deep_copy_parameter_value(allocator, &context_value->value, &new_value->value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to deep copy value.");
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&scope->values, &new_value->name.cur, new_value, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add deep copy to scope.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_scope_value_destroy(new_value);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+}
+
+static int s_init_top_level_scope(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_request_context *context,
+ const struct aws_endpoints_ruleset *ruleset,
+ const struct aws_partitions_config *partitions,
+ struct aws_endpoints_resolution_scope *scope) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(context);
+ AWS_PRECONDITION(ruleset);
+ AWS_PRECONDITION(scope);
+
+ struct aws_endpoints_scope_value *val = NULL;
+ scope->rule_idx = 0;
+ scope->rules = &ruleset->rules;
+ scope->partitions = partitions;
+
+ if (aws_hash_table_init(
+ &scope->values,
+ allocator,
+ 0,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_scope_value_destroy_cb)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values.");
+ goto on_error;
+ }
+
+ if (s_deep_copy_context_to_scope(allocator, context, scope)) {
+ goto on_error;
+ }
+
+ if (aws_array_list_init_dynamic(&scope->added_keys, allocator, 10, sizeof(struct aws_byte_cursor))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init added keys.");
+ goto on_error;
+ }
+
+ /* Add defaults to the top level scope. */
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(&ruleset->parameters); !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+ const struct aws_byte_cursor key = *(const struct aws_byte_cursor *)iter.element.key;
+ struct aws_endpoints_parameter *value = (struct aws_endpoints_parameter *)iter.element.value;
+
+ /* Skip non-required values, since they cannot have default values. */
+ if (!value->is_required) {
+ continue;
+ }
+
+ struct aws_hash_element *existing = NULL;
+ if (aws_hash_table_find(&scope->values, &key, &existing)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+ }
+
+ if (existing == NULL) {
+ if (!value->has_default_value) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "No value or default for required parameter.");
+ goto on_error;
+ }
+
+ val = aws_endpoints_scope_value_new(allocator, key);
+ AWS_ASSERT(val);
+
+ switch (value->type) {
+ case AWS_ENDPOINTS_PARAMETER_STRING:
+ val->value.type = AWS_ENDPOINTS_VALUE_STRING;
+ val->value.v.owning_cursor_string =
+ aws_endpoints_non_owning_cursor_create(value->default_value.string);
+ break;
+ case AWS_ENDPOINTS_PARAMETER_BOOLEAN:
+ val->value.type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ val->value.v.boolean = value->default_value.boolean;
+ break;
+ default:
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected parameter type.");
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&scope->values, &val->name.cur, val, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add value to top level scope.");
+ goto on_error;
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_scope_value_destroy(val);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+}
+
+static void s_scope_clean_up(struct aws_endpoints_resolution_scope *scope) {
+ AWS_PRECONDITION(scope);
+
+ aws_hash_table_clean_up(&scope->values);
+ aws_array_list_clean_up(&scope->added_keys);
+}
+
+static int s_resolve_expr(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_expr *expr,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value);
+
+static int s_resolve_template(
+ struct aws_byte_cursor template,
+ void *user_data,
+ struct aws_owning_cursor *out_owning_cursor);
+
+int aws_endpoints_argv_expect(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_array_list *argv,
+ size_t idx,
+ enum aws_endpoints_value_type expected_type,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_ZERO_STRUCT(*out_value);
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_expr argv_expr;
+ if (aws_array_list_get_at(argv, &argv_expr, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to parse argv");
+ goto on_error;
+ }
+
+ if (s_resolve_expr(allocator, &argv_expr, scope, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve argv.");
+ goto on_error;
+ }
+
+ if (expected_type != AWS_ENDPOINTS_VALUE_ANY && argv_value.type != expected_type) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE,
+ "Unexpected arg type actual: %u expected %u.",
+ argv_value.type,
+ expected_type);
+ goto on_error;
+ }
+
+ *out_value = argv_value;
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&argv_value);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+/*
+******************************
+* Expr/String resolve
+******************************
+*/
+
+static int s_resolve_expr(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_expr *expr,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_ZERO_STRUCT(*out_value);
+ switch (expr->type) {
+ case AWS_ENDPOINTS_EXPR_STRING: {
+ struct aws_byte_buf buf;
+ struct resolve_template_callback_data data = {.allocator = allocator, .scope = scope};
+ if (aws_byte_buf_init_from_resolved_templated_string(
+ allocator, &buf, expr->e.string, s_resolve_template, &data, false)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated string.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf));
+ aws_byte_buf_clean_up(&buf);
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_BOOLEAN: {
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = expr->e.boolean;
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_NUMBER: {
+ out_value->type = AWS_ENDPOINTS_VALUE_NUMBER;
+ out_value->v.number = expr->e.number;
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_ARRAY: {
+ out_value->type = AWS_ENDPOINTS_VALUE_ARRAY;
+ /* TODO: deep copy */
+ out_value->v.array = expr->e.array;
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_REFERENCE: {
+ struct aws_hash_element *element;
+ if (aws_hash_table_find(&scope->values, &expr->e.reference, &element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to deref.");
+ goto on_error;
+ }
+
+ if (element == NULL) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ } else {
+ struct aws_endpoints_scope_value *aws_endpoints_scope_value = element->value;
+ *out_value = aws_endpoints_scope_value->value;
+ if (aws_endpoints_scope_value->value.type == AWS_ENDPOINTS_VALUE_STRING) {
+ /* Value will not own underlying mem and instead its owned
+ by the scope, so set it to NULL. */
+ out_value->v.owning_cursor_string.string = NULL;
+ } else if (aws_endpoints_scope_value->value.type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ out_value->v.owning_cursor_object.string = NULL;
+ }
+ }
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_FUNCTION: {
+ if (aws_endpoints_dispatch_standard_lib_fn_resolve(
+ expr->e.function.fn, allocator, &expr->e.function.argv, scope, out_value)) {
+ goto on_error;
+ }
+ break;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_one_condition(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_condition *condition,
+ struct aws_endpoints_resolution_scope *scope,
+ bool *out_is_truthy) {
+
+ struct aws_endpoints_scope_value *scope_value = NULL;
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(allocator, &condition->expr, scope, &val)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve expr.");
+ goto on_error;
+ }
+
+ *out_is_truthy = is_value_truthy(&val);
+
+ /* Note: assigning value is skipped if condition is falsy, since nothing can
+ use it and that avoids adding value and then removing it from scope right away. */
+ if (*out_is_truthy && condition->assign.len > 0) {
+ /* If condition assigns a value, push it to scope and let scope
+ handle value memory. */
+ scope_value = aws_endpoints_scope_value_new(allocator, condition->assign);
+ scope_value->value = val;
+
+ if (aws_array_list_push_back(&scope->added_keys, &scope_value->name.cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to update key at given scope.");
+ goto on_error;
+ }
+
+ int was_created = 1;
+ if (aws_hash_table_put(&scope->values, &scope_value->name.cur, scope_value, &was_created)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to set assigned variable.");
+ goto on_error;
+ }
+
+ /* Shadowing existing values is prohibited. */
+ if (!was_created) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Assigned variable shadows existing one.");
+ goto on_error;
+ }
+ } else {
+ /* Otherwise clean up temp value */
+ aws_endpoints_value_clean_up(&val);
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_scope_value_destroy(scope_value);
+ /* Only cleanup value if mem ownership was not transferred to scope value. */
+ if (scope_value == NULL) {
+ aws_endpoints_value_clean_up(&val);
+ }
+
+ *out_is_truthy = false;
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_conditions(
+ struct aws_allocator *allocator,
+ const struct aws_array_list *conditions,
+ struct aws_endpoints_resolution_scope *scope,
+ bool *out_is_truthy) {
+
+ /* Note: spec defines empty conditions list as truthy. */
+ *out_is_truthy = true;
+
+ for (size_t idx = 0; idx < aws_array_list_length(conditions); ++idx) {
+ struct aws_endpoints_condition *condition = NULL;
+ if (aws_array_list_get_at_ptr(conditions, (void **)&condition, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to retrieve condition.");
+ goto on_error;
+ }
+
+ if (s_resolve_one_condition(allocator, condition, scope, out_is_truthy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve condition.");
+ goto on_error;
+ }
+
+ /* truthiness of all conditions is an AND of truthiness for each condition,
+ hence first false one short circuits resolution */
+ if (!*out_is_truthy) {
+ break;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ *out_is_truthy = false;
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_endpoints_path_through_array(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *value,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_PRECONDITION(value->type == AWS_ENDPOINTS_VALUE_ARRAY);
+
+ uint64_t index;
+ struct aws_byte_cursor split = {0};
+ if ((!aws_byte_cursor_next_split(&path_cur, '[', &split) || split.len > 0) ||
+ !aws_byte_cursor_next_split(&path_cur, ']', &split) || aws_byte_cursor_utf8_parse_u64(split, &index)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse index from template string.");
+ goto on_error;
+ }
+
+ if (index < aws_array_list_length(&value->v.array)) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_endpoints_expr *expr = NULL;
+ if (aws_array_list_get_at_ptr(&value->v.array, (void **)&expr, (size_t)index)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to index into resolved value");
+ goto on_error;
+ }
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(allocator, expr, scope, &val)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve val.");
+ aws_endpoints_value_clean_up(&val);
+ goto on_error;
+ }
+
+ *out_value = val;
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_endpoints_path_through_object(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_value *value,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_ZERO_STRUCT(*out_value);
+ struct aws_json_value *root_node = NULL;
+
+ struct aws_byte_cursor value_cur = value->type != AWS_ENDPOINTS_VALUE_STRING ? value->v.owning_cursor_string.cur
+ : value->v.owning_cursor_object.cur;
+
+ root_node = aws_json_value_new_from_string(allocator, value_cur);
+ const struct aws_json_value *result;
+ if (aws_path_through_json(allocator, root_node, path_cur, &result)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through json.");
+ goto on_error;
+ }
+
+ if (result == NULL) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ } else if (aws_json_value_is_string(result)) {
+ struct aws_byte_cursor final;
+ if (aws_json_value_get_string(result, &final)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse string from node.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, final);
+ } else if (aws_json_value_is_array(result) || aws_json_value_is_object(result)) {
+ struct aws_byte_buf json_blob;
+ aws_byte_buf_init(&json_blob, allocator, 0);
+
+ if (aws_byte_buf_append_json_string(result, &json_blob)) {
+ aws_byte_buf_clean_up(&json_blob);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to extract properties.");
+ goto on_error;
+ }
+
+ aws_byte_buf_clean_up(&json_blob);
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &json_blob));
+ } else if (aws_json_value_is_boolean(result)) {
+ if (aws_json_value_get_boolean(result, &out_value->v.boolean)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse boolean from node.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ } else if (aws_json_value_is_number(result)) {
+ if (aws_json_value_get_number(result, &out_value->v.number)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse number from node.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_NUMBER;
+ }
+
+ aws_json_value_destroy(root_node);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_json_value_destroy(root_node);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_templated_value_with_pathing(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_byte_cursor template_cur,
+ struct aws_owning_cursor *out_owning_cursor) {
+
+ struct aws_endpoints_value resolved_value = {0};
+ struct aws_byte_cursor split = {0};
+ if (!aws_byte_cursor_next_split(&template_cur, '#', &split) || split.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value in template string.");
+ goto on_error;
+ }
+
+ struct aws_hash_element *elem = NULL;
+ if (aws_hash_table_find(&scope->values, &split, &elem) || elem == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Templated value does not exist: " PRInSTR, AWS_BYTE_CURSOR_PRI(split));
+ goto on_error;
+ }
+
+ struct aws_endpoints_scope_value *scope_value = elem->value;
+ if (!aws_byte_cursor_next_split(&template_cur, '#', &split)) {
+ if (scope_value->value.type != AWS_ENDPOINTS_VALUE_STRING) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected type: must be string if pathing is not provided");
+ goto on_error;
+ }
+
+ *out_owning_cursor = aws_endpoints_non_owning_cursor_create(scope_value->value.v.owning_cursor_string.cur);
+ return AWS_OP_SUCCESS;
+ }
+
+ if (scope_value->value.type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ if (aws_endpoints_path_through_object(allocator, &scope_value->value, split, &resolved_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through object.");
+ goto on_error;
+ }
+ } else if (scope_value->value.type == AWS_ENDPOINTS_VALUE_ARRAY) {
+ if (aws_endpoints_path_through_array(allocator, scope, &scope_value->value, split, &resolved_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through array.");
+ goto on_error;
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value type for pathing through.");
+ goto on_error;
+ }
+
+ if (resolved_value.type != AWS_ENDPOINTS_VALUE_STRING) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Templated string didn't resolve to string");
+ goto on_error;
+ }
+
+ if (resolved_value.v.owning_cursor_string.string != NULL) {
+ /* Transfer ownership of the underlying string. */
+ *out_owning_cursor = aws_endpoints_owning_cursor_from_string(resolved_value.v.owning_cursor_string.string);
+ resolved_value.v.owning_cursor_string.string = NULL;
+ } else {
+ /* Unlikely to get here since current pathing always return new string. */
+ *out_owning_cursor = aws_endpoints_non_owning_cursor_create(resolved_value.v.owning_cursor_string.cur);
+ }
+
+ aws_endpoints_value_clean_up(&resolved_value);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&resolved_value);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_template(struct aws_byte_cursor template, void *user_data, struct aws_owning_cursor *out_cursor) {
+
+ struct resolve_template_callback_data *data = user_data;
+
+ if (s_resolve_templated_value_with_pathing(data->allocator, data->scope, template, out_cursor)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve template value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ ;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+******************************
+* Request Context
+******************************
+*/
+
+static void s_endpoints_request_context_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_request_context *context = data;
+ aws_hash_table_clean_up(&context->values);
+
+ aws_mem_release(context->allocator, context);
+}
+
+struct aws_endpoints_request_context *aws_endpoints_request_context_new(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_request_context *context =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_request_context));
+
+ context->allocator = allocator;
+ aws_ref_count_init(&context->ref_count, context, s_endpoints_request_context_destroy);
+
+ if (aws_hash_table_init(
+ &context->values,
+ allocator,
+ 0,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_scope_value_destroy_cb)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values.");
+ goto on_error;
+ }
+
+ return context;
+
+on_error:
+ s_endpoints_request_context_destroy(context);
+ return NULL;
+}
+
+struct aws_endpoints_request_context *aws_endpoints_request_context_acquire(
+ struct aws_endpoints_request_context *request_context) {
+ AWS_PRECONDITION(request_context);
+ if (request_context) {
+ aws_ref_count_acquire(&request_context->ref_count);
+ }
+ return request_context;
+}
+
+struct aws_endpoints_request_context *aws_endpoints_request_context_release(
+ struct aws_endpoints_request_context *request_context) {
+ if (request_context) {
+ aws_ref_count_release(&request_context->ref_count);
+ }
+ return NULL;
+}
+
+int aws_endpoints_request_context_add_string(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_scope_value *val = aws_endpoints_scope_value_new(allocator, name);
+ val->value.type = AWS_ENDPOINTS_VALUE_STRING;
+ val->value.v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, value);
+
+ if (aws_hash_table_put(&context->values, &val->name.cur, val, NULL)) {
+ aws_endpoints_scope_value_destroy(val);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+ };
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_request_context_add_boolean(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ bool value) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_scope_value *val = aws_endpoints_scope_value_new(allocator, name);
+ val->value.type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ val->value.v.boolean = value;
+
+ if (aws_hash_table_put(&context->values, &val->name.cur, val, NULL)) {
+ aws_endpoints_scope_value_destroy(val);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+ };
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+******************************
+* Rule engine.
+******************************
+*/
+
+struct aws_endpoints_resolved_endpoint {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+ enum aws_endpoints_resolved_endpoint_type type;
+ union {
+ struct resolved_endpoint {
+ struct aws_byte_buf url;
+ struct aws_byte_buf properties;
+ struct aws_hash_table headers;
+ } endpoint;
+ struct aws_byte_buf error;
+ } r;
+};
+
+static void s_endpoints_resolved_endpoint_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_resolved_endpoint *resolved = data;
+ if (resolved->type == AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ aws_byte_buf_clean_up(&resolved->r.endpoint.url);
+ aws_byte_buf_clean_up(&resolved->r.endpoint.properties);
+ aws_hash_table_clean_up(&resolved->r.endpoint.headers);
+ } else if (resolved->type == AWS_ENDPOINTS_RESOLVED_ERROR) {
+ aws_byte_buf_clean_up(&resolved->r.error);
+ }
+ aws_mem_release(resolved->allocator, resolved);
+}
+
+struct aws_endpoints_resolved_endpoint *s_endpoints_resolved_endpoint_new(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_resolved_endpoint *resolved =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_resolved_endpoint));
+ resolved->allocator = allocator;
+
+ aws_ref_count_init(&resolved->ref_count, resolved, s_endpoints_resolved_endpoint_destroy);
+
+ return resolved;
+}
+
+struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_acquire(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint) {
+ AWS_PRECONDITION(resolved_endpoint);
+ if (resolved_endpoint) {
+ aws_ref_count_acquire(&resolved_endpoint->ref_count);
+ }
+ return resolved_endpoint;
+}
+
+struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_release(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint) {
+ if (resolved_endpoint) {
+ aws_ref_count_release(&resolved_endpoint->ref_count);
+ }
+ return NULL;
+}
+
+enum aws_endpoints_resolved_endpoint_type aws_endpoints_resolved_endpoint_get_type(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint) {
+ AWS_PRECONDITION(resolved_endpoint);
+ return resolved_endpoint->type;
+}
+
+int aws_endpoints_resolved_endpoint_get_url(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_url) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_url);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_url = aws_byte_cursor_from_buf(&resolved_endpoint->r.endpoint.url);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_resolved_endpoint_get_properties(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_properties) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_properties);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_properties = aws_byte_cursor_from_buf(&resolved_endpoint->r.endpoint.properties);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_resolved_endpoint_get_headers(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ const struct aws_hash_table **out_headers) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_headers);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_headers = &resolved_endpoint->r.endpoint.headers;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_resolved_endpoint_get_error(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_error) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_error);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ERROR) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_error = aws_byte_cursor_from_buf(&resolved_endpoint->r.error);
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_endpoints_rule_engine {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_endpoints_ruleset *ruleset;
+ struct aws_partitions_config *partitions_config;
+};
+
+static void s_endpoints_rule_engine_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_rule_engine *engine = data;
+ aws_endpoints_ruleset_release(engine->ruleset);
+ aws_partitions_config_release(engine->partitions_config);
+
+ aws_mem_release(engine->allocator, engine);
+}
+
+struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_new(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_ruleset *ruleset,
+ struct aws_partitions_config *partitions_config) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(ruleset);
+
+ struct aws_endpoints_rule_engine *engine = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_rule_engine));
+ engine->allocator = allocator;
+ engine->ruleset = ruleset;
+ engine->partitions_config = partitions_config;
+
+ aws_endpoints_ruleset_acquire(ruleset);
+ aws_partitions_config_acquire(partitions_config);
+ aws_ref_count_init(&engine->ref_count, engine, s_endpoints_rule_engine_destroy);
+
+ return engine;
+}
+
+struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_acquire(struct aws_endpoints_rule_engine *rule_engine) {
+ AWS_PRECONDITION(rule_engine);
+ if (rule_engine) {
+ aws_ref_count_acquire(&rule_engine->ref_count);
+ }
+ return rule_engine;
+}
+
+struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_release(struct aws_endpoints_rule_engine *rule_engine) {
+ if (rule_engine) {
+ aws_ref_count_release(&rule_engine->ref_count);
+ }
+ return NULL;
+}
+
+int s_revert_scope(struct aws_endpoints_resolution_scope *scope) {
+
+ for (size_t idx = 0; idx < aws_array_list_length(&scope->added_keys); ++idx) {
+ struct aws_byte_cursor *cur = NULL;
+ if (aws_array_list_get_at_ptr(&scope->added_keys, (void **)&cur, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to retrieve value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ }
+
+ aws_hash_table_remove(&scope->values, cur, NULL, NULL);
+ }
+
+ aws_array_list_clear(&scope->added_keys);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_on_string_array_element_destroy(void *element) {
+ struct aws_string *str = *(struct aws_string **)element;
+ aws_string_destroy(str);
+}
+
+static void s_callback_headers_destroy(void *data) {
+ struct aws_array_list *array = data;
+ struct aws_allocator *alloc = array->alloc;
+ aws_array_list_deep_clean_up(array, s_on_string_array_element_destroy);
+ aws_mem_release(alloc, array);
+}
+
+static int s_resolve_headers(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_hash_table *headers,
+ struct aws_hash_table *out_headers) {
+
+ struct aws_endpoints_value value;
+ struct aws_array_list *resolved_headers = NULL;
+
+ if (aws_hash_table_init(
+ out_headers,
+ allocator,
+ aws_hash_table_get_entry_count(headers),
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ s_callback_headers_destroy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init table for resolved headers");
+ goto on_error;
+ }
+
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(headers); !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+
+ struct aws_string *key = (struct aws_string *)iter.element.key;
+ struct aws_array_list *header_list = (struct aws_array_list *)iter.element.value;
+
+ resolved_headers = aws_mem_calloc(allocator, 1, sizeof(struct aws_array_list));
+ aws_array_list_init_dynamic(
+ resolved_headers, allocator, aws_array_list_length(header_list), sizeof(struct aws_string *));
+
+ for (size_t i = 0; i < aws_array_list_length(header_list); ++i) {
+ struct aws_endpoints_expr *expr = NULL;
+ if (aws_array_list_get_at_ptr(header_list, (void **)&expr, i)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get header.");
+ goto on_error;
+ }
+
+ if (s_resolve_expr(allocator, expr, scope, &value) || value.type != AWS_ENDPOINTS_VALUE_STRING) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve header expr.");
+ goto on_error;
+ }
+
+ struct aws_string *str = aws_string_new_from_cursor(allocator, &value.v.owning_cursor_string.cur);
+ if (aws_array_list_push_back(resolved_headers, &str)) {
+ aws_string_destroy(str);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resolved header to result.");
+ goto on_error;
+ }
+
+ aws_endpoints_value_clean_up(&value);
+ }
+
+ if (aws_hash_table_put(out_headers, aws_string_clone_or_reuse(allocator, key), resolved_headers, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resolved header to result.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&value);
+ if (resolved_headers != NULL) {
+ s_callback_headers_destroy(resolved_headers);
+ }
+ aws_hash_table_clean_up(out_headers);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_endpoints_rule_engine_resolve(
+ struct aws_endpoints_rule_engine *engine,
+ const struct aws_endpoints_request_context *context,
+ struct aws_endpoints_resolved_endpoint **out_resolved_endpoint) {
+
+ if (aws_array_list_length(&engine->ruleset->rules) == 0) {
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET);
+ }
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_resolution_scope scope;
+ if (s_init_top_level_scope(engine->allocator, context, engine->ruleset, engine->partitions_config, &scope)) {
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ while (scope.rule_idx < aws_array_list_length(scope.rules)) {
+ struct aws_endpoints_rule *rule = NULL;
+ if (aws_array_list_get_at_ptr(scope.rules, (void **)&rule, scope.rule_idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get rule.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ bool is_truthy = false;
+ if (s_resolve_conditions(engine->allocator, &rule->conditions, &scope, &is_truthy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve conditions.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ if (!is_truthy) {
+ s_revert_scope(&scope);
+ ++scope.rule_idx;
+ continue;
+ }
+
+ switch (rule->type) {
+ case AWS_ENDPOINTS_RULE_ENDPOINT: {
+ struct aws_endpoints_resolved_endpoint *endpoint = s_endpoints_resolved_endpoint_new(engine->allocator);
+ endpoint->type = AWS_ENDPOINTS_RESOLVED_ENDPOINT;
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(engine->allocator, &rule->rule_data.endpoint.url, &scope, &val) ||
+ val.type != AWS_ENDPOINTS_VALUE_STRING ||
+ aws_byte_buf_init_copy_from_cursor(
+ &endpoint->r.endpoint.url, engine->allocator, val.v.owning_cursor_string.cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated url.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ aws_endpoints_value_clean_up(&val);
+
+ struct resolve_template_callback_data data = {.allocator = engine->allocator, .scope = &scope};
+
+ if (rule->rule_data.endpoint.properties.len > 0 &&
+ aws_byte_buf_init_from_resolved_templated_string(
+ engine->allocator,
+ &endpoint->r.endpoint.properties,
+ aws_byte_cursor_from_buf(&rule->rule_data.endpoint.properties),
+ s_resolve_template,
+ &data,
+ true)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated properties.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ if (s_resolve_headers(
+ engine->allocator, &scope, &rule->rule_data.endpoint.headers, &endpoint->r.endpoint.headers)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated headers.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ *out_resolved_endpoint = endpoint;
+ goto on_done;
+ }
+ case AWS_ENDPOINTS_RULE_ERROR: {
+ struct aws_endpoints_resolved_endpoint *error = s_endpoints_resolved_endpoint_new(engine->allocator);
+ error->type = AWS_ENDPOINTS_RESOLVED_ERROR;
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(engine->allocator, &rule->rule_data.error.error, &scope, &val) ||
+ val.type != AWS_ENDPOINTS_VALUE_STRING ||
+ aws_byte_buf_init_copy_from_cursor(
+ &error->r.error, engine->allocator, val.v.owning_cursor_string.cur)) {
+ aws_endpoints_value_clean_up(&val);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated url.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ aws_endpoints_value_clean_up(&val);
+ *out_resolved_endpoint = error;
+ goto on_done;
+ }
+ case AWS_ENDPOINTS_RULE_TREE: {
+ /* jumping down a level */
+ aws_array_list_clear(&scope.added_keys);
+ scope.rule_idx = 0;
+ scope.rules = &rule->rule_data.tree.rules;
+ continue;
+ }
+ default: {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected rule type.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ }
+ }
+
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "All rules have been exhausted.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED);
+
+on_done:
+ AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Resolved endpoint with status %d", result);
+ s_scope_clean_up(&scope);
+ return result;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
new file mode 100644
index 00000000000..99f31a50629
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
@@ -0,0 +1,958 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/json.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+/* parameter types */
+static struct aws_byte_cursor s_string_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("string");
+static struct aws_byte_cursor s_boolean_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("boolean");
+
+/* rule types */
+static struct aws_byte_cursor s_endpoint_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("endpoint");
+static struct aws_byte_cursor s_error_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("error");
+static struct aws_byte_cursor s_tree_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("tree");
+
+static struct aws_byte_cursor s_supported_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1.0");
+
+static struct aws_byte_cursor s_empty_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("");
+
+/* TODO: improve error messages. Include json line num? or dump json node? */
+
+struct aws_byte_cursor aws_endpoints_get_supported_ruleset_version(void) {
+ return s_supported_version;
+}
+
+/*
+******************************
+* Parameter Getters.
+******************************
+*/
+enum aws_endpoints_parameter_type aws_endpoints_parameter_get_type(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->type;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_built_in(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->built_in;
+}
+
+int aws_endpoints_parameter_get_default_string(
+ const struct aws_endpoints_parameter *parameter,
+ struct aws_byte_cursor *out_cursor) {
+ AWS_PRECONDITION(parameter);
+ AWS_PRECONDITION(out_cursor);
+
+ if (parameter->type == AWS_ENDPOINTS_PARAMETER_STRING) {
+ *out_cursor = parameter->default_value.string;
+ return AWS_OP_SUCCESS;
+ };
+
+ *out_cursor = s_empty_cursor;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+}
+
+int aws_endpoints_parameter_get_default_boolean(
+ const struct aws_endpoints_parameter *parameter,
+ const bool **out_bool) {
+ AWS_PRECONDITION(parameter);
+ AWS_PRECONDITION(out_bool);
+
+ if (parameter->type == AWS_ENDPOINTS_PARAMETER_BOOLEAN) {
+ *out_bool = &parameter->default_value.boolean;
+ return AWS_OP_SUCCESS;
+ };
+
+ *out_bool = NULL;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+}
+
+bool aws_endpoints_parameters_get_is_required(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->is_required;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_documentation(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->documentation;
+}
+
+bool aws_endpoints_parameters_get_is_deprecated(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->is_deprecated;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_message(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->deprecated_message;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_since(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->deprecated_since;
+}
+
+/*
+******************************
+* Parser getters.
+******************************
+*/
+
+const struct aws_hash_table *aws_endpoints_ruleset_get_parameters(struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ return &ruleset->parameters;
+}
+
+struct aws_byte_cursor aws_endpoints_ruleset_get_version(const struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ return ruleset->version;
+}
+
+struct aws_byte_cursor aws_endpoints_ruleset_get_service_id(const struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ return ruleset->service_id;
+}
+
+/*
+******************************
+* Parser helpers.
+******************************
+*/
+
+static void s_on_rule_array_element_clean_up(void *element) {
+ struct aws_endpoints_rule *rule = element;
+ aws_endpoints_rule_clean_up(rule);
+}
+
+static void s_on_expr_element_clean_up(void *data) {
+ struct aws_endpoints_expr *expr = data;
+ aws_endpoints_expr_clean_up(expr);
+}
+
+static void s_callback_endpoints_parameter_destroy(void *data) {
+ struct aws_endpoints_parameter *parameter = data;
+ aws_endpoints_parameter_destroy(parameter);
+}
+
+static void s_callback_headers_destroy(void *data) {
+ struct aws_array_list *array = data;
+ struct aws_allocator *alloc = array->alloc;
+ aws_array_list_deep_clean_up(array, s_on_expr_element_clean_up);
+ aws_array_list_clean_up(array);
+ aws_mem_release(alloc, array);
+}
+
+struct array_parser_wrapper {
+ struct aws_allocator *allocator;
+ struct aws_array_list *array;
+};
+
+static int s_init_array_from_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *value_node,
+ struct aws_array_list *values,
+ aws_json_on_value_encountered_const_fn *value_fn) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(values);
+ AWS_PRECONDITION(value_node);
+ AWS_PRECONDITION(value_fn);
+
+ struct array_parser_wrapper wrapper = {
+ .allocator = allocator,
+ .array = values,
+ };
+
+ if (aws_json_const_iterate_array(value_node, value_fn, &wrapper)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to iterate through array.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct member_parser_wrapper {
+ struct aws_allocator *allocator;
+ struct aws_hash_table *table;
+};
+
+static int s_init_members_from_json(
+ struct aws_allocator *allocator,
+ struct aws_json_value *node,
+ struct aws_hash_table *table,
+ aws_json_on_member_encountered_const_fn *member_fn) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(node);
+ AWS_PRECONDITION(table);
+
+ struct member_parser_wrapper wrapper = {
+ .allocator = allocator,
+ .table = table,
+ };
+
+ if (aws_json_const_iterate_object(node, member_fn, &wrapper)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to iterate through member fields.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+******************************
+* Parser functions.
+******************************
+*/
+
+static int s_parse_function(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_function *function);
+
+/*
+ * Note: this function only fails in cases where node is a ref (ie object with a
+ * ref field), but cannot be parsed completely.
+ */
+static int s_try_parse_reference(const struct aws_json_value *node, struct aws_byte_cursor *out_reference) {
+ AWS_PRECONDITION(node);
+
+ AWS_ZERO_STRUCT(*out_reference);
+
+ struct aws_json_value *ref_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("ref"));
+ if (ref_node != NULL && aws_json_value_get_string(ref_node, out_reference)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse ref.");
+ AWS_ZERO_STRUCT(*out_reference);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_parse_expr(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_expr *expr);
+
+static int s_on_expr_element(
+ size_t idx,
+ const struct aws_json_value *value_node,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(value_node);
+ AWS_PRECONDITION(user_data);
+
+ struct array_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_expr expr;
+ if (s_parse_expr(wrapper->allocator, value_node, &expr)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse expr.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ aws_array_list_push_back(wrapper->array, &expr);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_parse_expr(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_expr *expr) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(node);
+ AWS_PRECONDITION(expr);
+
+ AWS_ZERO_STRUCT(*expr);
+
+ /* TODO: this recurses. in practical circumstances depth will never be high,
+ but we should still consider doing iterative approach */
+ if (aws_json_value_is_string(node) && !aws_json_value_get_string(node, &expr->e.string)) {
+ expr->type = AWS_ENDPOINTS_EXPR_STRING;
+ return AWS_OP_SUCCESS;
+ } else if (aws_json_value_is_number(node) && !aws_json_value_get_number(node, &expr->e.number)) {
+ expr->type = AWS_ENDPOINTS_EXPR_NUMBER;
+ return AWS_OP_SUCCESS;
+ } else if (aws_json_value_is_boolean(node) && !aws_json_value_get_boolean(node, &expr->e.boolean)) {
+ expr->type = AWS_ENDPOINTS_EXPR_BOOLEAN;
+ return AWS_OP_SUCCESS;
+ } else if (aws_json_value_is_array(node)) {
+ expr->type = AWS_ENDPOINTS_EXPR_ARRAY;
+ size_t num_elements = aws_json_get_array_size(node);
+ aws_array_list_init_dynamic(&expr->e.array, allocator, num_elements, sizeof(struct aws_endpoints_expr));
+ if (s_init_array_from_json(allocator, node, &expr->e.array, s_on_expr_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse array value type.");
+ goto on_error;
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_cursor reference;
+ if (s_try_parse_reference(node, &reference)) {
+ goto on_error;
+ }
+
+ if (reference.len > 0) {
+ expr->type = AWS_ENDPOINTS_EXPR_REFERENCE;
+ expr->e.reference = reference;
+ return AWS_OP_SUCCESS;
+ }
+
+ expr->type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(allocator, node, &expr->e.function)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_expr_clean_up(expr);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse expr type");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_parse_function(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_function *function) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(node);
+
+ AWS_ZERO_STRUCT(*function);
+
+ struct aws_json_value *fn_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("fn"));
+ if (fn_node == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Node is not a function.");
+ goto on_error;
+ }
+
+ struct aws_byte_cursor fn_cur;
+ if (aws_json_value_get_string(fn_node, &fn_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract fn name.");
+ goto on_error;
+ }
+
+ function->fn = AWS_ENDPOINTS_FN_LAST;
+ uint64_t hash = aws_hash_byte_cursor_ptr(&fn_cur);
+ for (int idx = AWS_ENDPOINTS_FN_FIRST; idx < AWS_ENDPOINTS_FN_LAST; ++idx) {
+ if (aws_endpoints_fn_name_hash[idx] == hash) {
+ function->fn = idx;
+ break;
+ }
+ }
+
+ if (function->fn == AWS_ENDPOINTS_FN_LAST) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_PARSING,
+ "Could not map function name to function type: " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(fn_cur));
+ goto on_error;
+ }
+
+ struct aws_json_value *argv_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("argv"));
+ if (argv_node == NULL || !aws_json_value_is_array(argv_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "No argv or unexpected type.");
+ goto on_error;
+ }
+
+ size_t num_args = aws_json_get_array_size(argv_node);
+ aws_array_list_init_dynamic(&function->argv, allocator, num_args, sizeof(struct aws_endpoints_expr));
+
+ if (s_init_array_from_json(allocator, argv_node, &function->argv, s_on_expr_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse argv.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_function_clean_up(function);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_parameter_key(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+ AWS_PRECONDITION(key);
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+
+ struct member_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_parameter *parameter = aws_endpoints_parameter_new(wrapper->allocator, *key);
+
+ /* required fields */
+ struct aws_byte_cursor type_cur;
+ struct aws_json_value *type_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("type"));
+ if (type_node == NULL || aws_json_value_get_string(type_node, &type_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter type.");
+ goto on_error;
+ }
+
+ enum aws_endpoints_parameter_type type;
+ if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_string_type_cur)) {
+ type = AWS_ENDPOINTS_PARAMETER_STRING;
+ } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_boolean_type_cur)) {
+ type = AWS_ENDPOINTS_PARAMETER_BOOLEAN;
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for parameter.");
+ goto on_error;
+ }
+
+ parameter->type = type;
+
+ struct aws_json_value *documentation_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("documentation"));
+
+ /* TODO: spec calls for documentation to be required, but several test-cases
+ are missing docs on parameters */
+ if (documentation_node != NULL) {
+ if (aws_json_value_get_string(documentation_node, &parameter->documentation)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter documentation.");
+ goto on_error;
+ }
+ }
+
+ /* optional fields */
+ struct aws_json_value *built_in_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("builtIn"));
+ if (built_in_node != NULL) {
+ if (aws_json_value_get_string(built_in_node, &parameter->built_in)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for built-in parameter field.");
+ goto on_error;
+ }
+ }
+
+ struct aws_json_value *required_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("required"));
+ if (required_node != NULL) {
+ if (!aws_json_value_is_boolean(required_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for required parameter field.");
+ goto on_error;
+ }
+ aws_json_value_get_boolean(required_node, &parameter->is_required);
+ }
+
+ struct aws_json_value *default_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("default"));
+ parameter->has_default_value = default_node != NULL;
+ if (default_node != NULL) {
+ if (type == AWS_ENDPOINTS_PARAMETER_STRING &&
+ aws_json_value_get_string(default_node, &parameter->default_value.string)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for default parameter value.");
+ goto on_error;
+ } else if (
+ type == AWS_ENDPOINTS_PARAMETER_BOOLEAN &&
+ aws_json_value_get_boolean(default_node, &parameter->default_value.boolean)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for default parameter value.");
+ goto on_error;
+ }
+ }
+
+ struct aws_json_value *deprecated_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("deprecated"));
+ if (deprecated_node != NULL) {
+ struct aws_json_value *deprecated_message_node =
+ aws_json_value_get_from_object(deprecated_node, aws_byte_cursor_from_c_str("message"));
+ if (deprecated_message_node != NULL &&
+ aws_json_value_get_string(deprecated_message_node, &parameter->deprecated_message)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for deprecated message.");
+ goto on_error;
+ }
+
+ struct aws_json_value *deprecated_since_node =
+ aws_json_value_get_from_object(deprecated_node, aws_byte_cursor_from_c_str("since"));
+ if (deprecated_since_node != NULL &&
+ aws_json_value_get_string(deprecated_since_node, &parameter->deprecated_since)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for deprecated since.");
+ goto on_error;
+ }
+ }
+
+ if (aws_hash_table_put(wrapper->table, &parameter->name, parameter, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to add parameter.");
+ goto on_error;
+ }
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_parameter_destroy(parameter);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_condition_element(
+ size_t idx,
+ const struct aws_json_value *condition_node,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(condition_node);
+ AWS_PRECONDITION(user_data);
+
+ struct array_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_condition condition;
+ AWS_ZERO_STRUCT(condition);
+
+ condition.expr.type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(wrapper->allocator, condition_node, &condition.expr.e.function)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse function.");
+ goto on_error;
+ }
+
+ struct aws_json_value *assign_node =
+ aws_json_value_get_from_object(condition_node, aws_byte_cursor_from_c_str("assign"));
+ if (assign_node != NULL && aws_json_value_get_string(assign_node, &condition.assign)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for assign.");
+ goto on_error;
+ }
+
+ aws_array_list_push_back(wrapper->array, &condition);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_condition_clean_up(&condition);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_header_element(
+ size_t idx,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+ struct array_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_expr expr;
+ if (s_parse_expr(wrapper->allocator, value, &expr)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected format for header element.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ aws_array_list_push_back(wrapper->array, &expr);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_headers_key(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+ AWS_PRECONDITION(key);
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+ struct member_parser_wrapper *wrapper = user_data;
+
+ if (!aws_json_value_is_array(value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected format for header value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ size_t num_elements = aws_json_get_array_size(value);
+ struct aws_array_list *headers = aws_mem_calloc(wrapper->allocator, 1, sizeof(struct aws_array_list));
+ aws_array_list_init_dynamic(headers, wrapper->allocator, num_elements, sizeof(struct aws_endpoints_expr));
+ if (s_init_array_from_json(wrapper->allocator, value, headers, s_on_header_element)) {
+ goto on_error;
+ }
+
+ aws_hash_table_put(wrapper->table, aws_string_new_from_cursor(wrapper->allocator, key), headers, NULL);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ if (headers) {
+ s_callback_headers_destroy(headers);
+ }
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_parse_endpoints_rule_data_endpoint(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *rule_node,
+ struct aws_endpoints_rule_data_endpoint *data_rule) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(rule_node);
+ AWS_PRECONDITION(data_rule);
+
+ data_rule->allocator = allocator;
+ struct aws_json_value *url_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("url"));
+ if (url_node == NULL || aws_json_value_is_string(url_node)) {
+ data_rule->url.type = AWS_ENDPOINTS_EXPR_STRING;
+ aws_json_value_get_string(url_node, &data_rule->url.e.string);
+ } else {
+ struct aws_byte_cursor reference;
+ if (s_try_parse_reference(url_node, &reference)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse reference.");
+ goto on_error;
+ }
+
+ if (reference.len > 0) {
+ data_rule->url.type = AWS_ENDPOINTS_EXPR_REFERENCE;
+ data_rule->url.e.reference = reference;
+ } else {
+ data_rule->url.type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(allocator, url_node, &data_rule->url.e.function)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to function.");
+ goto on_error;
+ }
+ }
+ }
+
+ struct aws_json_value *properties_node =
+ aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("properties"));
+ if (properties_node != NULL) {
+ aws_byte_buf_init(&data_rule->properties, allocator, 0);
+
+ if (aws_byte_buf_append_json_string(properties_node, &data_rule->properties)) {
+ aws_byte_buf_clean_up(&data_rule->properties);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract properties.");
+ goto on_error;
+ }
+ }
+
+ /* TODO: this is currently aws_string* to aws_array_list*
+ * We cannot use same trick as for params to use aws_byte_cursor as key,
+ * since value is a generic type. We can wrap list into a struct, but
+ * seems ugly. Anything cleaner?
+ */
+ aws_hash_table_init(
+ &data_rule->headers,
+ allocator,
+ 20,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ s_callback_headers_destroy);
+
+ struct aws_json_value *headers_node =
+ aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("headers"));
+ if (headers_node != NULL) {
+
+ if (s_init_members_from_json(allocator, headers_node, &data_rule->headers, s_on_headers_key)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameters.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_rule_data_endpoint_clean_up(data_rule);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_parse_endpoints_rule_data_error(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *error_node,
+ struct aws_endpoints_rule_data_error *data_rule) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(error_node);
+ AWS_PRECONDITION(data_rule);
+
+ if (aws_json_value_is_string(error_node)) {
+ data_rule->error.type = AWS_ENDPOINTS_EXPR_STRING;
+ aws_json_value_get_string(error_node, &data_rule->error.e.string);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_cursor reference;
+ if (s_try_parse_reference(error_node, &reference)) {
+ goto on_error;
+ }
+
+ if (reference.len > 0) {
+ data_rule->error.type = AWS_ENDPOINTS_EXPR_REFERENCE;
+ data_rule->error.e.reference = reference;
+ return AWS_OP_SUCCESS;
+ }
+
+ data_rule->error.type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(allocator, error_node, &data_rule->error.e.function)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_rule_data_error_clean_up(data_rule);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse error rule.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_rule_element(
+ size_t idx,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data);
+
+static int s_parse_endpoints_rule_data_tree(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *rule_node,
+ struct aws_endpoints_rule_data_tree *rule_data) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(rule_node);
+ AWS_PRECONDITION(rule_data);
+
+ struct aws_json_value *rules_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("rules"));
+ if (rules_node == NULL || !aws_json_value_is_array(rules_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Rules node is missing or unexpected type.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ size_t num_rules = aws_json_get_array_size(rules_node);
+ aws_array_list_init_dynamic(&rule_data->rules, allocator, num_rules, sizeof(struct aws_endpoints_rule));
+ if (s_init_array_from_json(allocator, rules_node, &rule_data->rules, s_on_rule_element)) {
+ aws_endpoints_rule_data_tree_clean_up(rule_data);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse rules.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_rule_element(
+ size_t idx,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+
+ struct array_parser_wrapper *wrapper = user_data;
+
+ /* Required fields */
+ struct aws_byte_cursor type_cur;
+ struct aws_json_value *type_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("type"));
+ if (type_node == NULL || aws_json_value_get_string(type_node, &type_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract rule type.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ enum aws_endpoints_rule_type type;
+ if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_endpoint_type_cur)) {
+ type = AWS_ENDPOINTS_RULE_ENDPOINT;
+ } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_error_type_cur)) {
+ type = AWS_ENDPOINTS_RULE_ERROR;
+ } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_tree_type_cur)) {
+ type = AWS_ENDPOINTS_RULE_TREE;
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected rule type.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ struct aws_endpoints_rule rule;
+ AWS_ZERO_STRUCT(rule);
+ rule.type = type;
+
+ struct aws_json_value *conditions_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("conditions"));
+ if (conditions_node == NULL || !aws_json_value_is_array(conditions_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Conditions node missing.");
+ goto on_error;
+ }
+
+ size_t num_conditions = aws_json_get_array_size(conditions_node);
+ aws_array_list_init_dynamic(
+ &rule.conditions, wrapper->allocator, num_conditions, sizeof(struct aws_endpoints_condition));
+
+ if (s_init_array_from_json(wrapper->allocator, conditions_node, &rule.conditions, s_on_condition_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract conditions.");
+ goto on_error;
+ }
+
+ switch (type) {
+ case AWS_ENDPOINTS_RULE_ENDPOINT: {
+ struct aws_json_value *endpoint_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("endpoint"));
+ if (endpoint_node == NULL ||
+ s_parse_endpoints_rule_data_endpoint(wrapper->allocator, endpoint_node, &rule.rule_data.endpoint)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract endpoint rule data.");
+ goto on_error;
+ }
+ break;
+ }
+ case AWS_ENDPOINTS_RULE_ERROR: {
+ struct aws_json_value *error_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("error"));
+ if (error_node == NULL ||
+ s_parse_endpoints_rule_data_error(wrapper->allocator, error_node, &rule.rule_data.error)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract error rule data.");
+ goto on_error;
+ }
+ break;
+ }
+ case AWS_ENDPOINTS_RULE_TREE: {
+ if (s_parse_endpoints_rule_data_tree(wrapper->allocator, value, &rule.rule_data.tree)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract tree rule data.");
+ goto on_error;
+ }
+ break;
+ }
+ default:
+ AWS_FATAL_ASSERT(false);
+ }
+
+ /* Optional fields */
+ struct aws_json_value *documentation_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("documentation"));
+ if (documentation_node != NULL) {
+ if (aws_json_value_get_string(documentation_node, &rule.documentation)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter documentation.");
+ goto on_error;
+ }
+ }
+
+ aws_array_list_push_back(wrapper->array, &rule);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_rule_clean_up(&rule);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_init_ruleset_from_json(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_ruleset *ruleset,
+ struct aws_byte_cursor json) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(ruleset);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&json));
+
+ struct aws_json_value *root = aws_json_value_new_from_string(allocator, json);
+
+ if (root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse provided string as json.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ ruleset->json_root = root;
+
+ struct aws_json_value *version_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("version"));
+ if (version_node == NULL || aws_json_value_get_string(version_node, &ruleset->version)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract version.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET);
+ goto on_error;
+ }
+
+#ifdef ENDPOINTS_VERSION_CHECK /* TODO: samples are currently inconsistent with versions. skip check for now */
+ if (!aws_byte_cursor_eq_c_str(&ruleset->version, &s_supported_version)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unsupported ruleset version.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET);
+ goto on_error;
+ }
+#endif
+
+ struct aws_json_value *service_id_node =
+ aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("serviceId"));
+
+ if (service_id_node != NULL && aws_json_value_get_string(service_id_node, &ruleset->service_id)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract serviceId.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET);
+ goto on_error;
+ }
+
+ aws_hash_table_init(
+ &ruleset->parameters,
+ allocator,
+ 20,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_callback_endpoints_parameter_destroy);
+
+ struct aws_json_value *parameters_node =
+ aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("parameters"));
+ if (parameters_node == NULL ||
+ s_init_members_from_json(allocator, parameters_node, &ruleset->parameters, s_on_parameter_key)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameters.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ goto on_error;
+ }
+
+ struct aws_json_value *rules_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("rules"));
+ if (rules_node == NULL || !aws_json_value_is_array(rules_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for rules node.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ goto on_error;
+ }
+ size_t num_rules = aws_json_get_array_size(rules_node);
+ aws_array_list_init_dynamic(&ruleset->rules, allocator, num_rules, sizeof(struct aws_endpoints_rule));
+ if (s_init_array_from_json(allocator, rules_node, &ruleset->rules, s_on_rule_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract rules.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return AWS_OP_ERR;
+}
+
+static void s_endpoints_ruleset_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_ruleset *ruleset = data;
+
+ aws_json_value_destroy(ruleset->json_root);
+
+ aws_hash_table_clean_up(&ruleset->parameters);
+
+ aws_array_list_deep_clean_up(&ruleset->rules, s_on_rule_array_element_clean_up);
+
+ aws_mem_release(ruleset->allocator, ruleset);
+}
+
+struct aws_endpoints_ruleset *aws_endpoints_ruleset_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor ruleset_json) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&ruleset_json));
+
+ struct aws_endpoints_ruleset *ruleset = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_ruleset));
+ ruleset->allocator = allocator;
+
+ if (s_init_ruleset_from_json(allocator, ruleset, ruleset_json)) {
+ s_endpoints_ruleset_destroy(ruleset);
+ return NULL;
+ }
+
+ aws_ref_count_init(&ruleset->ref_count, ruleset, s_endpoints_ruleset_destroy);
+
+ return ruleset;
+}
+
+struct aws_endpoints_ruleset *aws_endpoints_ruleset_acquire(struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ if (ruleset) {
+ aws_ref_count_acquire(&ruleset->ref_count);
+ }
+ return ruleset;
+}
+
+struct aws_endpoints_ruleset *aws_endpoints_ruleset_release(struct aws_endpoints_ruleset *ruleset) {
+ if (ruleset) {
+ aws_ref_count_release(&ruleset->ref_count);
+ }
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
new file mode 100644
index 00000000000..b559579c380
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
@@ -0,0 +1,639 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/json.h>
+#include <aws/common/string.h>
+#include <aws/common/uri.h>
+
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+#include <aws/sdkutils/resource_name.h>
+
+static struct aws_byte_cursor s_scheme_http = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http");
+static struct aws_byte_cursor s_scheme_https = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https");
+
+static int s_resolve_fn_is_set(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_ANY, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for isSet.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = argv_value.type != AWS_ENDPOINTS_VALUE_NONE;
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ return result;
+}
+
+static int s_resolve_fn_not(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for not.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = !argv_value.v.boolean;
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ return result;
+}
+
+static int s_resolve_fn_get_attr(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_value argv_path = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_ANY, &argv_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_STRING, &argv_path)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for get attr.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_cursor path_cur = argv_path.v.owning_cursor_string.cur;
+
+ if (argv_value.type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ if (aws_endpoints_path_through_object(allocator, &argv_value, path_cur, out_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ } else if (argv_value.type == AWS_ENDPOINTS_VALUE_ARRAY) {
+ if (aws_endpoints_path_through_array(allocator, scope, &argv_value, path_cur, out_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through array.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value type for pathing through.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_path);
+ return result;
+}
+
+static int s_resolve_fn_substring(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value input_value = {0};
+ struct aws_endpoints_value start_value = {0};
+ struct aws_endpoints_value stop_value = {0};
+ struct aws_endpoints_value reverse_value = {0};
+ if (aws_array_list_length(argv) != 4 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &input_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_NUMBER, &start_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 2, AWS_ENDPOINTS_VALUE_NUMBER, &stop_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 3, AWS_ENDPOINTS_VALUE_BOOLEAN, &reverse_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for substring.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (start_value.v.number >= stop_value.v.number ||
+ input_value.v.owning_cursor_string.cur.len < stop_value.v.number) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ for (size_t idx = 0; idx < input_value.v.owning_cursor_string.cur.len; ++idx) {
+ if (input_value.v.owning_cursor_string.cur.ptr[idx] > 127) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+ }
+
+ if (!reverse_value.v.boolean) {
+ size_t start = (size_t)start_value.v.number;
+ size_t end = (size_t)stop_value.v.number;
+ struct aws_byte_cursor substring = {
+ .ptr = input_value.v.owning_cursor_string.cur.ptr + start,
+ .len = end - start,
+ };
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, substring);
+ } else {
+ size_t r_start = input_value.v.owning_cursor_string.cur.len - (size_t)stop_value.v.number;
+ size_t r_stop = input_value.v.owning_cursor_string.cur.len - (size_t)start_value.v.number;
+
+ struct aws_byte_cursor substring = {
+ .ptr = input_value.v.owning_cursor_string.cur.ptr + r_start,
+ .len = r_stop - r_start,
+ };
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, substring);
+ }
+
+on_done:
+ aws_endpoints_value_clean_up(&input_value);
+ aws_endpoints_value_clean_up(&start_value);
+ aws_endpoints_value_clean_up(&stop_value);
+ aws_endpoints_value_clean_up(&reverse_value);
+ return result;
+}
+
+static int s_resolve_fn_string_equals(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value_1 = {0};
+ struct aws_endpoints_value argv_value_2 = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value_1) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_STRING, &argv_value_2)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve stringEquals.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean =
+ aws_byte_cursor_eq(&argv_value_1.v.owning_cursor_string.cur, &argv_value_2.v.owning_cursor_string.cur);
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value_1);
+ aws_endpoints_value_clean_up(&argv_value_2);
+ return result;
+}
+
+static int s_resolve_fn_boolean_equals(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value_1 = {0};
+ struct aws_endpoints_value argv_value_2 = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value_1) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value_2)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve booleanEquals.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = argv_value_1.v.boolean == argv_value_2.v.boolean;
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value_1);
+ aws_endpoints_value_clean_up(&argv_value_2);
+ return result;
+}
+
+static int s_resolve_fn_uri_encode(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_byte_buf buf = {0};
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parameter to uri encode.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_byte_buf_init(&buf, allocator, 10)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parameter to uri encode.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_byte_buf_append_encoding_uri_param(&buf, &argv_value.v.owning_cursor_string.cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to uri encode value.");
+ aws_byte_buf_clean_up(&buf);
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf));
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_byte_buf_clean_up(&buf);
+ return result;
+}
+
+static bool s_is_uri_ip(struct aws_byte_cursor host, bool is_uri_encoded) {
+ return aws_is_ipv4(host) || aws_is_ipv6(host, is_uri_encoded);
+}
+
+static int s_resolve_fn_parse_url(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_uri uri;
+ struct aws_json_value *root = NULL;
+ struct aws_endpoints_value argv_url = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_url)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for parse url.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_uri_init_parse(&uri, allocator, &argv_url.v.owning_cursor_string.cur)) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ /* reset error from parser, since non-uri strings should successfully resolve to none. */
+ aws_reset_error();
+ goto on_done;
+ }
+
+ if (aws_uri_query_string(&uri)->len > 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ const struct aws_byte_cursor *scheme = aws_uri_scheme(&uri);
+ AWS_ASSERT(scheme != NULL);
+
+ root = aws_json_value_new_object(allocator);
+
+ if (scheme->len == 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ if (!(aws_byte_cursor_eq(scheme, &s_scheme_http) || aws_byte_cursor_eq(scheme, &s_scheme_https))) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("scheme"), aws_json_value_new_string(allocator, *scheme))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add scheme to object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ const struct aws_byte_cursor *authority = aws_uri_authority(&uri);
+ AWS_ASSERT(authority != NULL);
+
+ if (authority->len == 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("authority"), aws_json_value_new_string(allocator, *authority))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add authority to object.");
+ goto on_done;
+ }
+
+ const struct aws_byte_cursor *path = aws_uri_path(&uri);
+
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("path"), aws_json_value_new_string(allocator, *path))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add path to object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_cursor normalized_path_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("normalizedPath");
+ struct aws_byte_buf normalized_path_buf;
+ if (aws_byte_buf_init_from_normalized_uri_path(allocator, *path, &normalized_path_buf) ||
+ aws_json_value_add_to_object(
+ root,
+ normalized_path_cur,
+ aws_json_value_new_string(allocator, aws_byte_cursor_from_buf(&normalized_path_buf)))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to normalize path.");
+ aws_byte_buf_clean_up(&normalized_path_buf);
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ aws_byte_buf_clean_up(&normalized_path_buf);
+
+ const struct aws_byte_cursor *host_name = aws_uri_host_name(&uri);
+ bool is_ip = s_is_uri_ip(*host_name, true);
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("isIp"), aws_json_value_new_boolean(allocator, is_ip))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add isIp to object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_buf buf;
+ if (aws_byte_buf_init(&buf, allocator, 0)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed init buffer for parseUrl return.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_byte_buf_append_json_string(root, &buf)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to create JSON object.");
+ aws_byte_buf_clean_up(&buf);
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf));
+
+ aws_byte_buf_clean_up(&buf);
+
+on_done:
+ aws_uri_clean_up(&uri);
+ aws_endpoints_value_clean_up(&argv_url);
+ aws_json_value_destroy(root);
+ return result;
+}
+
+static int s_resolve_is_valid_host_label(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_value argv_allow_subdomains = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_allow_subdomains)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve not.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean =
+ aws_is_valid_host_label(argv_value.v.owning_cursor_string.cur, argv_allow_subdomains.v.boolean);
+
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_allow_subdomains);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_allow_subdomains);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_fn_aws_partition(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_region = {0};
+
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_region)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve arguments for partitions.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_hash_element *element = NULL;
+ struct aws_byte_cursor key = argv_region.v.owning_cursor_string.cur;
+ if (aws_hash_table_find(&scope->partitions->region_to_partition_info, &key, &element)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to find partition info. " PRInSTR, AWS_BYTE_CURSOR_PRI(key));
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (element != NULL) {
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_create(allocator, ((struct aws_partition_info *)element->value)->info);
+ goto on_done;
+ }
+
+ key = aws_map_region_to_partition(key);
+
+ if (key.len == 0) {
+ key = aws_byte_cursor_from_c_str("aws");
+ }
+
+ if (aws_hash_table_find(&scope->partitions->region_to_partition_info, &key, &element) || element == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to find partition info. " PRInSTR, AWS_BYTE_CURSOR_PRI(key));
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_create(allocator, ((struct aws_partition_info *)element->value)->info);
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_region);
+ return result;
+}
+
+static int s_resolve_fn_aws_parse_arn(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_json_value *object = NULL;
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parseArn.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_resource_name arn;
+ if (aws_resource_name_init_from_cur(&arn, &argv_value.v.owning_cursor_string.cur)) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ object = aws_json_value_new_object(allocator);
+ if (object == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init object for parseArn.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (arn.partition.len == 0 || arn.resource_id.len == 0 || arn.service.len == 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ /* Split resource id into components, either on : or / */
+ /* TODO: support multiple delims in existing split helper? */
+ struct aws_json_value *resource_id_node = aws_json_value_new_array(allocator);
+ size_t start = 0;
+ for (size_t i = 0; i < arn.resource_id.len; ++i) {
+ if (arn.resource_id.ptr[i] == '/' || arn.resource_id.ptr[i] == ':') {
+ struct aws_byte_cursor cur = {
+ .ptr = arn.resource_id.ptr + start,
+ .len = i - start,
+ };
+
+ struct aws_json_value *element = aws_json_value_new_string(allocator, cur);
+ if (element == NULL || aws_json_value_add_array_element(resource_id_node, element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resource id element");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ start = i + 1;
+ }
+ }
+
+ if (start <= arn.resource_id.len) {
+ struct aws_byte_cursor cur = {
+ .ptr = arn.resource_id.ptr + start,
+ .len = arn.resource_id.len - start,
+ };
+ struct aws_json_value *element = aws_json_value_new_string(allocator, cur);
+ if (element == NULL || aws_json_value_add_array_element(resource_id_node, element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resource id element");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ }
+
+ if (aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("partition"), aws_json_value_new_string(allocator, arn.partition)) ||
+ aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("service"), aws_json_value_new_string(allocator, arn.service)) ||
+ aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("region"), aws_json_value_new_string(allocator, arn.region)) ||
+ aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("accountId"), aws_json_value_new_string(allocator, arn.account_id)) ||
+ aws_json_value_add_to_object(object, aws_byte_cursor_from_c_str("resourceId"), resource_id_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add elements to object for parseArn.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_json(allocator, object));
+
+ if (out_value->v.owning_cursor_object.cur.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to create string from json.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+on_done:
+ aws_json_value_destroy(object);
+ aws_endpoints_value_clean_up(&argv_value);
+ return result;
+}
+
+static int s_resolve_is_virtual_hostable_s3_bucket(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_value argv_allow_subdomains = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_allow_subdomains)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for isVirtualHostableS3Bucket.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_cursor label_cur = argv_value.v.owning_cursor_string.cur;
+
+ bool has_uppercase_chars = false;
+ for (size_t i = 0; i < label_cur.len; ++i) {
+ if (label_cur.ptr[i] >= 'A' && label_cur.ptr[i] <= 'Z') {
+ has_uppercase_chars = true;
+ break;
+ }
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = (label_cur.len >= 3 && label_cur.len <= 63) && !has_uppercase_chars &&
+ aws_is_valid_host_label(label_cur, argv_allow_subdomains.v.boolean) &&
+ !aws_is_ipv4(label_cur);
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_allow_subdomains);
+ return result;
+}
+
+typedef int(standard_lib_function_fn)(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value);
+
+static standard_lib_function_fn *s_resolve_fn_vt[AWS_ENDPOINTS_FN_LAST] = {
+ [AWS_ENDPOINTS_FN_IS_SET] = s_resolve_fn_is_set,
+ [AWS_ENDPOINTS_FN_NOT] = s_resolve_fn_not,
+ [AWS_ENDPOINTS_FN_GET_ATTR] = s_resolve_fn_get_attr,
+ [AWS_ENDPOINTS_FN_SUBSTRING] = s_resolve_fn_substring,
+ [AWS_ENDPOINTS_FN_STRING_EQUALS] = s_resolve_fn_string_equals,
+ [AWS_ENDPOINTS_FN_BOOLEAN_EQUALS] = s_resolve_fn_boolean_equals,
+ [AWS_ENDPOINTS_FN_URI_ENCODE] = s_resolve_fn_uri_encode,
+ [AWS_ENDPOINTS_FN_PARSE_URL] = s_resolve_fn_parse_url,
+ [AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL] = s_resolve_is_valid_host_label,
+ [AWS_ENDPOINTS_FN_AWS_PARTITION] = s_resolve_fn_aws_partition,
+ [AWS_ENDPOINTS_FN_AWS_PARSE_ARN] = s_resolve_fn_aws_parse_arn,
+ [AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET] = s_resolve_is_virtual_hostable_s3_bucket,
+};
+
+int aws_endpoints_dispatch_standard_lib_fn_resolve(
+ enum aws_endpoints_fn_type type,
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+ return s_resolve_fn_vt[type](allocator, argv, scope, out_value);
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
new file mode 100644
index 00000000000..36e0c60bec7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
@@ -0,0 +1,235 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/json.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+uint64_t aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_LAST];
+
+void aws_endpoints_rule_engine_init(void) {
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_IS_SET] = aws_hash_c_string("isSet");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_NOT] = aws_hash_c_string("not");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_GET_ATTR] = aws_hash_c_string("getAttr");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_SUBSTRING] = aws_hash_c_string("substring");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_STRING_EQUALS] = aws_hash_c_string("stringEquals");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_BOOLEAN_EQUALS] = aws_hash_c_string("booleanEquals");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_URI_ENCODE] = aws_hash_c_string("uriEncode");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_PARSE_URL] = aws_hash_c_string("parseURL");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL] = aws_hash_c_string("isValidHostLabel");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_PARTITION] = aws_hash_c_string("aws.partition");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_PARSE_ARN] = aws_hash_c_string("aws.parseArn");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET] =
+ aws_hash_c_string("aws.isVirtualHostableS3Bucket");
+}
+
+static void s_on_condition_array_element_clean_up(void *element) {
+ struct aws_endpoints_condition *condition = element;
+ aws_endpoints_condition_clean_up(condition);
+}
+
+static void s_on_rule_array_element_clean_up(void *element) {
+ struct aws_endpoints_rule *rule = element;
+ aws_endpoints_rule_clean_up(rule);
+}
+
+static void s_on_expr_array_element_clean_up(void *element) {
+ struct aws_endpoints_expr *expr = element;
+ aws_endpoints_expr_clean_up(expr);
+}
+
+struct aws_partition_info *aws_partition_info_new(struct aws_allocator *allocator, struct aws_byte_cursor name) {
+ AWS_PRECONDITION(allocator);
+ struct aws_partition_info *partition_info = aws_mem_calloc(allocator, 1, sizeof(struct aws_partition_info));
+
+ partition_info->allocator = allocator;
+ partition_info->name = name;
+
+ return partition_info;
+}
+
+void aws_partition_info_destroy(struct aws_partition_info *partition_info) {
+ if (partition_info == NULL) {
+ return;
+ }
+
+ if (!partition_info->is_copy) {
+ aws_string_destroy(partition_info->info);
+ }
+
+ aws_mem_release(partition_info->allocator, partition_info);
+}
+
+struct aws_endpoints_parameter *aws_endpoints_parameter_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name) {
+ AWS_PRECONDITION(allocator);
+ struct aws_endpoints_parameter *parameter = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_parameter));
+
+ parameter->allocator = allocator;
+ parameter->name = name;
+
+ return parameter;
+}
+
+void aws_endpoints_parameter_destroy(struct aws_endpoints_parameter *parameter) {
+ if (parameter == NULL) {
+ return;
+ }
+
+ aws_mem_release(parameter->allocator, parameter);
+}
+
+void aws_endpoints_rule_clean_up(struct aws_endpoints_rule *rule) {
+ AWS_PRECONDITION(rule);
+
+ aws_array_list_deep_clean_up(&rule->conditions, s_on_condition_array_element_clean_up);
+
+ switch (rule->type) {
+ case AWS_ENDPOINTS_RULE_ENDPOINT:
+ aws_endpoints_rule_data_endpoint_clean_up(&rule->rule_data.endpoint);
+ break;
+ case AWS_ENDPOINTS_RULE_ERROR:
+ aws_endpoints_rule_data_error_clean_up(&rule->rule_data.error);
+ break;
+ case AWS_ENDPOINTS_RULE_TREE:
+ aws_endpoints_rule_data_tree_clean_up(&rule->rule_data.tree);
+ break;
+ default:
+ AWS_FATAL_ASSERT(false);
+ }
+
+ AWS_ZERO_STRUCT(*rule);
+}
+
+void aws_endpoints_rule_data_endpoint_clean_up(struct aws_endpoints_rule_data_endpoint *rule_data) {
+ AWS_PRECONDITION(rule_data);
+
+ aws_endpoints_expr_clean_up(&rule_data->url);
+
+ aws_byte_buf_clean_up(&rule_data->properties);
+ aws_hash_table_clean_up(&rule_data->headers);
+
+ AWS_ZERO_STRUCT(*rule_data);
+}
+
+void aws_endpoints_rule_data_error_clean_up(struct aws_endpoints_rule_data_error *rule_data) {
+ AWS_PRECONDITION(rule_data);
+
+ aws_endpoints_expr_clean_up(&rule_data->error);
+
+ AWS_ZERO_STRUCT(*rule_data);
+}
+
+void aws_endpoints_rule_data_tree_clean_up(struct aws_endpoints_rule_data_tree *rule_data) {
+ AWS_PRECONDITION(rule_data);
+
+ aws_array_list_deep_clean_up(&rule_data->rules, s_on_rule_array_element_clean_up);
+ AWS_ZERO_STRUCT(*rule_data);
+}
+
+void aws_endpoints_condition_clean_up(struct aws_endpoints_condition *condition) {
+ AWS_PRECONDITION(condition);
+
+ aws_endpoints_expr_clean_up(&condition->expr);
+ AWS_ZERO_STRUCT(*condition);
+}
+
+void aws_endpoints_function_clean_up(struct aws_endpoints_function *function) {
+ AWS_PRECONDITION(function);
+
+ aws_array_list_deep_clean_up(&function->argv, s_on_expr_array_element_clean_up);
+ AWS_ZERO_STRUCT(*function);
+}
+
+void aws_endpoints_expr_clean_up(struct aws_endpoints_expr *expr) {
+ AWS_PRECONDITION(expr);
+
+ switch (expr->type) {
+ case AWS_ENDPOINTS_EXPR_STRING:
+ case AWS_ENDPOINTS_EXPR_BOOLEAN:
+ case AWS_ENDPOINTS_EXPR_NUMBER:
+ case AWS_ENDPOINTS_EXPR_REFERENCE:
+ break;
+ case AWS_ENDPOINTS_EXPR_FUNCTION:
+ aws_endpoints_function_clean_up(&expr->e.function);
+ break;
+ case AWS_ENDPOINTS_EXPR_ARRAY:
+ aws_array_list_deep_clean_up(&expr->e.array, s_on_expr_array_element_clean_up);
+ break;
+ default:
+ AWS_FATAL_ASSERT(false);
+ }
+
+ AWS_ZERO_STRUCT(*expr);
+}
+
+struct aws_endpoints_scope_value *aws_endpoints_scope_value_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name_cur) {
+ AWS_PRECONDITION(allocator);
+ struct aws_endpoints_scope_value *value = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_scope_value));
+
+ value->allocator = allocator;
+ value->name = aws_endpoints_owning_cursor_from_cursor(allocator, name_cur);
+
+ return value;
+}
+
+void aws_endpoints_scope_value_destroy(struct aws_endpoints_scope_value *scope_value) {
+ if (scope_value == NULL) {
+ return;
+ }
+ aws_string_destroy(scope_value->name.string);
+ aws_endpoints_value_clean_up(&scope_value->value);
+ aws_mem_release(scope_value->allocator, scope_value);
+}
+
+void aws_endpoints_value_clean_up_cb(void *value);
+
+void aws_endpoints_value_clean_up(struct aws_endpoints_value *aws_endpoints_value) {
+ AWS_PRECONDITION(aws_endpoints_value);
+
+ if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_STRING) {
+ aws_string_destroy(aws_endpoints_value->v.owning_cursor_string.string);
+ }
+
+ if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ aws_string_destroy(aws_endpoints_value->v.owning_cursor_object.string);
+ }
+
+ if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_ARRAY) {
+ aws_array_list_deep_clean_up(&aws_endpoints_value->v.array, aws_endpoints_value_clean_up_cb);
+ }
+
+ AWS_ZERO_STRUCT(*aws_endpoints_value);
+}
+
+void aws_endpoints_value_clean_up_cb(void *value) {
+ struct aws_endpoints_value *aws_endpoints_value = value;
+ aws_endpoints_value_clean_up(aws_endpoints_value);
+}
+
+int aws_endpoints_deep_copy_parameter_value(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_value *from,
+ struct aws_endpoints_value *to) {
+
+ to->type = from->type;
+
+ if (to->type == AWS_ENDPOINTS_VALUE_STRING) {
+ to->v.owning_cursor_string = aws_endpoints_owning_cursor_create(allocator, from->v.owning_cursor_string.string);
+ } else if (to->type == AWS_ENDPOINTS_VALUE_BOOLEAN) {
+ to->v.boolean = from->v.boolean;
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected value type.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
new file mode 100644
index 00000000000..1fdf246adb4
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
@@ -0,0 +1,588 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/json.h>
+#include <aws/common/logging.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+#include <aws/sdkutils/sdkutils.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER /* Disable sscanf warnings on windows. */
+# pragma warning(disable : 4204)
+# pragma warning(disable : 4706)
+# pragma warning(disable : 4996)
+#endif
+
+/* 4 octets of 3 chars max + 3 separators + null terminator */
+#define AWS_IPV4_STR_LEN 16
+#define IP_CHAR_FMT "%03" SCNu16
+
+/* arbitrary max length of a region. curent longest region name is 16 chars */
+#define AWS_REGION_LEN 50
+
+bool aws_is_ipv4(struct aws_byte_cursor host) {
+ if (host.len > AWS_IPV4_STR_LEN - 1) {
+ return false;
+ }
+
+ char copy[AWS_IPV4_STR_LEN] = {0};
+ memcpy(copy, host.ptr, host.len);
+
+ uint16_t octet[4] = {0};
+ char remainder[2] = {0};
+ if (4 != sscanf(
+ copy,
+ IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "%1s",
+ &octet[0],
+ &octet[1],
+ &octet[2],
+ &octet[3],
+ remainder)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < 4; ++i) {
+ if (octet[i] > 255) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool s_starts_with(struct aws_byte_cursor cur, uint8_t ch) {
+ return cur.len > 0 && cur.ptr[0] == ch;
+}
+
+static bool s_ends_with(struct aws_byte_cursor cur, uint8_t ch) {
+ return cur.len > 0 && cur.ptr[cur.len - 1] == ch;
+}
+
+static bool s_is_ipv6_char(uint8_t value) {
+ return aws_isxdigit(value) || value == ':';
+}
+
+/* actual encoding is %25, but % is omitted for simplicity, since split removes it */
+static struct aws_byte_cursor s_percent_uri_enc = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("25");
+/*
+ * IPv6 format:
+ * 8 groups of 4 hex chars separated by colons (:)
+ * leading 0s in each group can be skipped
+ * 2 or more consecutive zero groups can be replaced by double colon (::),
+ * but only once.
+ * ipv6 literal can be scoped by to zone by appending % followed by zone name
+ * ( does not look like there is length reqs on zone name length. this
+ * implementation enforces that its > 1 )
+ * ipv6 can be embedded in url, in which case it must be wrapped inside []
+ * and % be uri encoded as %25.
+ * Implementation is fairly trivial and just iterates through the string
+ * keeping track of the spec above.
+ */
+bool aws_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded) {
+ if (host.len == 0) {
+ return false;
+ }
+
+ if (is_uri_encoded) {
+ if (!s_starts_with(host, '[') || !s_ends_with(host, ']')) {
+ return false;
+ }
+ aws_byte_cursor_advance(&host, 1);
+ --host.len;
+ }
+
+ struct aws_byte_cursor substr = {0};
+ /* first split is required ipv6 part */
+ bool is_split = aws_byte_cursor_next_split(&host, '%', &substr);
+ AWS_ASSERT(is_split); /* function is guaranteed to return at least one split */
+
+ if (!is_split || substr.len == 0 || (s_starts_with(substr, ':') || s_ends_with(substr, ':')) ||
+ !aws_byte_cursor_satisfies_pred(&substr, s_is_ipv6_char)) {
+ return false;
+ }
+
+ uint8_t group_count = 0;
+ bool has_double_colon = false;
+ struct aws_byte_cursor group = {0};
+ while (aws_byte_cursor_next_split(&substr, ':', &group)) {
+ ++group_count;
+
+ if (group_count > 8 || /* too many groups */
+ group.len > 4 || /* too many chars in group */
+ (has_double_colon && group.len == 0)) { /* only one double colon allowed */
+ return false;
+ }
+
+ has_double_colon = has_double_colon || group.len == 0;
+ }
+
+ /* second split is optional zone part */
+ if (aws_byte_cursor_next_split(&host, '%', &substr)) {
+ if ((is_uri_encoded &&
+ (substr.len < 3 ||
+ !aws_byte_cursor_starts_with(&substr, &s_percent_uri_enc))) || /* encoding for % + 1 extra char */
+ (!is_uri_encoded && substr.len == 0) || /* at least 1 char */
+ !aws_byte_cursor_satisfies_pred(&substr, aws_isalnum)) {
+ return false;
+ }
+ }
+
+ return has_double_colon ? group_count < 7 : group_count == 8;
+}
+
+static char s_known_countries[][3] = {{"us"}, {"eu"}, {"ap"}, {"sa"}, {"ca"}, {"me"}, {"af"}};
+
+struct aws_byte_cursor aws_map_region_to_partition(struct aws_byte_cursor region) {
+ if (region.len > AWS_REGION_LEN - 1) {
+ return aws_byte_cursor_from_c_str("");
+ }
+
+ char copy[AWS_REGION_LEN] = {0};
+ memcpy(copy, region.ptr, region.len);
+
+ char country[3] = {0};
+ char location[31] = {0};
+ uint8_t num = 0;
+
+ if (3 == sscanf(copy, "%2[^-]-%30[^-]-%03" SCNu8, country, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ for (size_t i = 0; i < sizeof(s_known_countries) / sizeof(s_known_countries[0]); ++i) {
+ if (0 == strncmp(s_known_countries[i], country, 3)) {
+ return aws_byte_cursor_from_c_str("aws");
+ }
+ }
+
+ if (0 == strncmp("cn", country, 3)) {
+ return aws_byte_cursor_from_c_str("aws-cn");
+ }
+ }
+ }
+
+ if (2 == sscanf(copy, "us-gov-%30[^-]-%03" SCNu8, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ return aws_byte_cursor_from_c_str("aws-us-gov");
+ }
+ }
+
+ if (2 == sscanf(copy, "us-iso-%30[^-]-%03" SCNu8, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ return aws_byte_cursor_from_c_str("aws-iso");
+ }
+ }
+
+ if (2 == sscanf(copy, "us-isob-%30[^-]-%03" SCNu8, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ return aws_byte_cursor_from_c_str("aws-iso-b");
+ }
+ }
+
+ return aws_byte_cursor_from_c_str("");
+}
+
+bool aws_is_valid_host_label(struct aws_byte_cursor label, bool allow_subdomains) {
+ bool next_must_be_alnum = true;
+ size_t subdomain_count = 0;
+
+ for (size_t i = 0; i < label.len; ++i) {
+ if (label.ptr[i] == '.') {
+ if (!allow_subdomains || subdomain_count == 0) {
+ return false;
+ }
+
+ if (!aws_isalnum(label.ptr[i - 1])) {
+ return false;
+ }
+
+ next_must_be_alnum = true;
+ subdomain_count = 0;
+ continue;
+ }
+
+ if (next_must_be_alnum && !aws_isalnum(label.ptr[i])) {
+ return false;
+ } else if (label.ptr[i] != '-' && !aws_isalnum(label.ptr[i])) {
+ return false;
+ }
+
+ next_must_be_alnum = false;
+ ++subdomain_count;
+
+ if (subdomain_count > 63) {
+ return false;
+ }
+ }
+
+ return aws_isalnum(label.ptr[label.len - 1]);
+}
+
+struct aws_byte_cursor s_path_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/");
+
+int aws_byte_buf_init_from_normalized_uri_path(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_buf *out_normalized_path) {
+ /* Normalized path is just regular path that ensures that path starts and ends with slash */
+
+ if (aws_byte_buf_init(out_normalized_path, allocator, path.len + 2)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed init buffer for parseUrl return.");
+ goto on_error;
+ }
+
+ if (path.len == 0) {
+ if (aws_byte_buf_append(out_normalized_path, &s_path_slash)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add path to object.");
+ goto on_error;
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ if (path.ptr[0] != '/') {
+ if (aws_byte_buf_append_dynamic(out_normalized_path, &s_path_slash)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append slash to normalized path.");
+ goto on_error;
+ }
+ }
+
+ if (aws_byte_buf_append_dynamic(out_normalized_path, &path)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append path to normalized path.");
+ goto on_error;
+ }
+
+ if (out_normalized_path->buffer[out_normalized_path->len - 1] != '/') {
+ if (aws_byte_buf_append_dynamic(out_normalized_path, &s_path_slash)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append slash to normalized path.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_byte_buf_clean_up(out_normalized_path);
+ return AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED;
+}
+
+struct aws_string *aws_string_new_from_json(struct aws_allocator *allocator, const struct aws_json_value *value) {
+ struct aws_byte_buf json_blob;
+ if (aws_byte_buf_init(&json_blob, allocator, 0)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init buffer for json conversion.");
+ goto on_error;
+ }
+
+ if (aws_byte_buf_append_json_string(value, &json_blob)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to convert json to string.");
+ goto on_error;
+ }
+
+ struct aws_string *ret = aws_string_new_from_buf(allocator, &json_blob);
+ aws_byte_buf_clean_up(&json_blob);
+ return ret;
+
+on_error:
+ aws_byte_buf_clean_up(&json_blob);
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ return NULL;
+}
+
+bool aws_endpoints_byte_cursor_eq(const void *a, const void *b) {
+ const struct aws_byte_cursor *a_cur = a;
+ const struct aws_byte_cursor *b_cur = b;
+ return aws_byte_cursor_eq(a_cur, b_cur);
+}
+
+void aws_array_list_deep_clean_up(struct aws_array_list *array, aws_array_callback_clean_up_fn on_clean_up_element) {
+ for (size_t idx = 0; idx < aws_array_list_length(array); ++idx) {
+ void *element = NULL;
+
+ aws_array_list_get_at_ptr(array, &element, idx);
+ AWS_ASSERT(element);
+ on_clean_up_element(element);
+ }
+
+ aws_array_list_clean_up(array);
+}
+
+/* TODO: this can be moved into common */
+static bool s_split_on_first_delim(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *out_split,
+ struct aws_byte_cursor *out_rest) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&input));
+
+ uint8_t *delim = memchr(input.ptr, split_on, input.len);
+ if (delim != NULL) {
+ out_split->ptr = input.ptr;
+ out_split->len = delim - input.ptr;
+
+ out_rest->ptr = delim;
+ out_rest->len = input.len - (delim - input.ptr);
+ return true;
+ }
+
+ *out_split = input;
+ out_rest->ptr = NULL;
+ out_rest->len = 0;
+ return false;
+}
+
+static int s_buf_append_and_update_quote_count(
+ struct aws_byte_buf *buf,
+ struct aws_byte_cursor to_append,
+ size_t *quote_count,
+ bool is_json) {
+
+ /* Dont count quotes if its not json. escaped quotes will be replaced with
+ regular quotes when ruleset json is parsed, which will lead to incorrect
+ results for when templates should be resolved in regular strings.
+ Note: in json blobs escaped quotes are preserved and bellow approach works. */
+ if (is_json) {
+ for (size_t idx = 0; idx < to_append.len; ++idx) {
+ if (to_append.ptr[idx] == '"' && !(idx > 0 && to_append.ptr[idx - 1] == '\\')) {
+ ++*quote_count;
+ }
+ }
+ }
+ return aws_byte_buf_append_dynamic(buf, &to_append);
+}
+
+static struct aws_byte_cursor escaped_closing_curly = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("}}");
+static struct aws_byte_cursor escaped_opening_curly = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{{");
+
+/*
+ * Small helper to deal with escapes correctly in strings that occur before
+ * template opening curly. General flow for resolving is to look for opening and
+ * then closing curly. This function correctly appends any escaped closing
+ * curlies and errors out if closing is not escaped (i.e. its unmatched).
+ */
+int s_append_template_prefix_to_buffer(
+ struct aws_byte_buf *out_buf,
+ struct aws_byte_cursor prefix,
+ size_t *quote_count,
+ bool is_json) {
+
+ struct aws_byte_cursor split = {0};
+ struct aws_byte_cursor rest = {0};
+
+ while (s_split_on_first_delim(prefix, '}', &split, &rest)) {
+ if (s_buf_append_and_update_quote_count(out_buf, split, quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+
+ if (*quote_count % 2 == 0) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '}')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 1);
+ prefix = rest;
+ continue;
+ }
+
+ if (aws_byte_cursor_starts_with(&rest, &escaped_closing_curly)) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '}')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 2);
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Unmatched or unescaped closing curly.");
+ goto on_error;
+ }
+
+ prefix = rest;
+ }
+
+ if (s_buf_append_and_update_quote_count(out_buf, split, quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_byte_buf_init_from_resolved_templated_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *out_buf,
+ struct aws_byte_cursor string,
+ aws_endpoints_template_resolve_fn resolve_callback,
+ void *user_data,
+ bool is_json) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_owning_cursor resolved_template;
+ AWS_ZERO_STRUCT(resolved_template);
+
+ if (aws_byte_buf_init(out_buf, allocator, string.len)) {
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ }
+
+ size_t quote_count = is_json ? 0 : 1;
+ struct aws_byte_cursor split = {0};
+ struct aws_byte_cursor rest = {0};
+ while (s_split_on_first_delim(string, '{', &split, &rest)) {
+ if (s_append_template_prefix_to_buffer(out_buf, split, &quote_count, is_json)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to buffer while evaluating templated sting.");
+ goto on_error;
+ }
+
+ if (quote_count % 2 == 0) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '{')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 1);
+ string = rest;
+ continue;
+ }
+
+ if (aws_byte_cursor_starts_with(&rest, &escaped_opening_curly)) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '{')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 2);
+ string = rest;
+ continue;
+ }
+
+ aws_byte_cursor_advance(&rest, 1);
+
+ struct aws_byte_cursor after_closing = {0};
+ if (!s_split_on_first_delim(rest, '}', &split, &after_closing)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Unmatched closing curly.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&after_closing, 1);
+ string = after_closing;
+
+ if (resolve_callback(split, user_data, &resolved_template)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to resolve template.");
+ goto on_error;
+ }
+
+ if (s_buf_append_and_update_quote_count(out_buf, resolved_template.cur, &quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append resolved value.");
+ goto on_error;
+ }
+
+ aws_owning_cursor_clean_up(&resolved_template);
+ }
+
+ if (s_buf_append_and_update_quote_count(out_buf, split, &quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_byte_buf_clean_up(out_buf);
+ aws_owning_cursor_clean_up(&resolved_template);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_path_through_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *root,
+ struct aws_byte_cursor path,
+ const struct aws_json_value **out_value) {
+
+ struct aws_array_list path_segments;
+ if (aws_array_list_init_dynamic(&path_segments, allocator, 10, sizeof(struct aws_byte_cursor)) ||
+ aws_byte_cursor_split_on_char(&path, '.', &path_segments)) {
+ goto on_error;
+ }
+
+ *out_value = root;
+ for (size_t idx = 0; idx < aws_array_list_length(&path_segments); ++idx) {
+ struct aws_byte_cursor path_el_cur;
+ if (aws_array_list_get_at(&path_segments, &path_el_cur, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get path element");
+ goto on_error;
+ }
+
+ struct aws_byte_cursor element_cur = {0};
+ aws_byte_cursor_next_split(&path_el_cur, '[', &element_cur);
+
+ struct aws_byte_cursor index_cur = {0};
+ bool has_index = aws_byte_cursor_next_split(&path_el_cur, '[', &index_cur) &&
+ aws_byte_cursor_next_split(&path_el_cur, ']', &index_cur);
+
+ if (element_cur.len > 0) {
+ *out_value = aws_json_value_get_from_object(*out_value, element_cur);
+ if (NULL == *out_value) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid path. " PRInSTR ".", AWS_BYTE_CURSOR_PRI(element_cur));
+ goto on_error;
+ }
+ }
+
+ if (has_index) {
+ uint64_t index;
+ if (aws_byte_cursor_utf8_parse_u64(index_cur, &index)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE,
+ "Failed to parse index: " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(index_cur));
+ goto on_error;
+ }
+ *out_value = aws_json_get_array_element(*out_value, (size_t)index);
+ if (NULL == *out_value) {
+ aws_reset_error();
+ goto on_success;
+ }
+ }
+ }
+
+on_success:
+ aws_array_list_clean_up(&path_segments);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_array_list_clean_up(&path_segments);
+ *out_value = NULL;
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+struct aws_owning_cursor aws_endpoints_owning_cursor_create(
+ struct aws_allocator *allocator,
+ const struct aws_string *str) {
+ struct aws_string *clone = aws_string_clone_or_reuse(allocator, str);
+ struct aws_owning_cursor ret = {.string = clone, .cur = aws_byte_cursor_from_string(clone)};
+ return ret;
+}
+
+struct aws_owning_cursor aws_endpoints_owning_cursor_from_string(struct aws_string *str) {
+ struct aws_owning_cursor ret = {.string = str, .cur = aws_byte_cursor_from_string(str)};
+ return ret;
+}
+
+struct aws_owning_cursor aws_endpoints_owning_cursor_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor cur) {
+ struct aws_string *clone = aws_string_new_from_cursor(allocator, &cur);
+ struct aws_owning_cursor ret = {.string = clone, .cur = aws_byte_cursor_from_string(clone)};
+ return ret;
+}
+
+struct aws_owning_cursor aws_endpoints_non_owning_cursor_create(struct aws_byte_cursor cur) {
+ struct aws_owning_cursor ret = {.string = NULL, .cur = cur};
+ return ret;
+}
+
+void aws_owning_cursor_clean_up(struct aws_owning_cursor *cursor) {
+ aws_string_destroy(cursor->string);
+ cursor->string = NULL;
+ cursor->cur.ptr = NULL;
+ cursor->cur.len = 0;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c b/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
new file mode 100644
index 00000000000..0ff758606f7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
@@ -0,0 +1,283 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/json.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/partitions.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+static struct aws_byte_cursor s_supported_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1.0");
+
+struct aws_byte_cursor aws_partitions_get_supported_version(void) {
+ return s_supported_version;
+}
+
+static void s_partitions_config_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_partitions_config *partitions = data;
+
+ aws_json_value_destroy(partitions->json_root);
+ aws_string_destroy(partitions->version);
+ aws_hash_table_clean_up(&partitions->region_to_partition_info);
+ aws_mem_release(partitions->allocator, partitions);
+}
+
+struct region_merge_wrapper {
+ struct aws_json_value *outputs_node;
+ struct aws_json_value *merge_node;
+};
+
+static int s_on_region_merge(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+
+ struct region_merge_wrapper *merge = user_data;
+
+ /*
+ * Note: latest partitions file includes description on every region.
+ * This results in a separate record created for every region, since any
+ * overrides on region create a new record that is a merge of partition
+ * default and override.
+ * Description is not used by endpoints rule engine, hence lets ignore it
+ * during merge for now to avoid creating numerous records that all have the
+ * same data.
+ * This decision can be revisited later if we decide to extend partitions
+ * parsing for any other use cases.
+ */
+ if (aws_byte_cursor_eq_c_str(key, "description")) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (merge->merge_node == NULL) {
+ merge->merge_node = aws_json_value_duplicate(merge->outputs_node);
+ }
+
+ /*
+ * Note: Its valid for region to add new field to default partition outputs
+ * instead of overriding existing one. So only delete previous value if it exists.
+ */
+ if (aws_json_value_has_key(merge->merge_node, *key) && aws_json_value_remove_from_object(merge->merge_node, *key)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to remove previous partition value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ if (aws_json_value_add_to_object(merge->merge_node, *key, aws_json_value_duplicate(value))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to overwrite partition data.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct partition_parse_wrapper {
+ struct aws_partitions_config *partitions;
+ struct aws_json_value *outputs_node;
+ struct aws_string *outputs_str;
+};
+
+static int s_on_region_element(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+
+ struct aws_partition_info *partition_info = NULL;
+ struct partition_parse_wrapper *wrapper = user_data;
+
+ struct region_merge_wrapper merge = {
+ .outputs_node = wrapper->outputs_node,
+ .merge_node = NULL,
+ };
+
+ if (aws_json_const_iterate_object(value, s_on_region_merge, &merge)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse partitions.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ if (merge.merge_node != NULL) {
+ partition_info = aws_partition_info_new(wrapper->partitions->allocator, *key);
+ partition_info->info = aws_string_new_from_json(wrapper->partitions->allocator, merge.merge_node);
+ aws_json_value_destroy(merge.merge_node);
+ } else {
+ partition_info = aws_partition_info_new(wrapper->partitions->allocator, *key);
+ partition_info->info = wrapper->outputs_str;
+ partition_info->is_copy = true;
+ }
+
+ if (aws_hash_table_put(
+ &wrapper->partitions->region_to_partition_info, &partition_info->name, partition_info, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ if (partition_info != NULL) {
+ aws_partition_info_destroy(partition_info);
+ }
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+}
+
+static int s_on_partition_element(
+ size_t idx,
+ const struct aws_json_value *partition_node,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+ (void)idx;
+
+ struct aws_partitions_config *partitions = user_data;
+
+ struct aws_byte_cursor id_cur;
+ struct aws_json_value *id_node = aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("id"));
+ if (id_node == NULL || aws_json_value_get_string(id_node, &id_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract id of partition.");
+ goto on_error;
+ }
+
+ struct aws_json_value *outputs_node =
+ aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("outputs"));
+ if (outputs_node == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract outputs of partition.");
+ goto on_error;
+ }
+
+ struct aws_partition_info *partition_info = aws_partition_info_new(partitions->allocator, id_cur);
+ partition_info->info = aws_string_new_from_json(partitions->allocator, outputs_node);
+
+ if (partition_info->info == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info.");
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&partitions->region_to_partition_info, &partition_info->name, partition_info, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info.");
+ goto on_error;
+ }
+
+ struct partition_parse_wrapper wrapper = {
+ .outputs_node = outputs_node, .outputs_str = partition_info->info, .partitions = partitions};
+
+ struct aws_json_value *regions_node =
+ aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("regions"));
+ if (regions_node != NULL && aws_json_const_iterate_object(regions_node, s_on_region_element, &wrapper)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse regions.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+}
+
+static int s_init_partitions_config_from_json(
+ struct aws_allocator *allocator,
+ struct aws_partitions_config *partitions,
+ struct aws_byte_cursor partitions_cur) {
+
+ struct aws_json_value *root = aws_json_value_new_from_string(allocator, partitions_cur);
+
+ if (root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse provided string as json.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ partitions->json_root = root;
+
+ struct aws_byte_cursor version_cur;
+ struct aws_json_value *version_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("version"));
+ if (version_node == NULL || aws_json_value_get_string(version_node, &version_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract version.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED);
+ }
+
+#ifdef ENDPOINTS_VERSION_CHECK /* TODO: samples are currently inconsistent with versions. skip check for now */
+ if (!aws_byte_cursor_eq_c_str(&version_cur, &s_supported_version)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Unsupported partitions version.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED);
+ goto on_error;
+ }
+#endif
+
+ struct aws_json_value *partitions_node =
+ aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("partitions"));
+ if (partitions_node == NULL || aws_json_const_iterate_array(partitions_node, s_on_partition_element, partitions)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse partitions.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_callback_partition_info_destroy(void *data) {
+ struct aws_partition_info *info = data;
+ aws_partition_info_destroy(info);
+}
+
+struct aws_partitions_config *aws_partitions_config_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor json) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&json));
+
+ struct aws_partitions_config *partitions = aws_mem_calloc(allocator, 1, sizeof(struct aws_partitions_config));
+ partitions->allocator = allocator;
+
+ if (aws_hash_table_init(
+ &partitions->region_to_partition_info,
+ allocator,
+ 20,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_callback_partition_info_destroy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to init partition info map.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ goto on_error;
+ }
+
+ if (s_init_partitions_config_from_json(allocator, partitions, json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to init partition info from json.");
+ goto on_error;
+ }
+
+ aws_ref_count_init(&partitions->ref_count, partitions, s_partitions_config_destroy);
+
+ return partitions;
+
+on_error:
+ s_partitions_config_destroy(partitions);
+ return NULL;
+}
+
+struct aws_partitions_config *aws_partitions_config_acquire(struct aws_partitions_config *partitions) {
+ AWS_PRECONDITION(partitions);
+ if (partitions) {
+ aws_ref_count_acquire(&partitions->ref_count);
+ }
+ return partitions;
+}
+
+struct aws_partitions_config *aws_partitions_config_release(struct aws_partitions_config *partitions) {
+ if (partitions) {
+ aws_ref_count_release(&partitions->ref_count);
+ }
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c b/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
new file mode 100644
index 00000000000..0687c5ea7e5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
@@ -0,0 +1,108 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/sdkutils/resource_name.h>
+
+#define ARN_SPLIT_COUNT ((size_t)5)
+#define ARN_PARTS_COUNT ((size_t)6)
+
+static const char ARN_DELIMETER[] = ":";
+static const char ARN_DELIMETER_CHAR = ':';
+
+static const size_t DELIMETER_LEN = 8; /* strlen("arn:::::") */
+
+int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input) {
+ struct aws_byte_cursor arn_parts[ARN_PARTS_COUNT];
+ struct aws_array_list arn_part_list;
+ aws_array_list_init_static(&arn_part_list, arn_parts, ARN_PARTS_COUNT, sizeof(struct aws_byte_cursor));
+ if (aws_byte_cursor_split_on_char_n(input, ARN_DELIMETER_CHAR, ARN_SPLIT_COUNT, &arn_part_list)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ struct aws_byte_cursor *arn_prefix;
+ if (aws_array_list_get_at_ptr(&arn_part_list, (void **)&arn_prefix, 0) ||
+ !aws_byte_cursor_eq_c_str(arn_prefix, "arn")) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->partition, 1)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->service, 2)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->region, 3)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->account_id, 4)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->resource_id, 5)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
+
+ *size = arn->partition.len + arn->region.len + arn->service.len + arn->account_id.len + arn->resource_id.len +
+ DELIMETER_LEN;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
+
+ const struct aws_byte_cursor prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:");
+ const struct aws_byte_cursor colon_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ARN_DELIMETER);
+
+ if (aws_byte_buf_append(buf, &prefix)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &arn->partition)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->service)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->region)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->account_id)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->resource_id)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c b/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
new file mode 100644
index 00000000000..2fb102f43ee
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
@@ -0,0 +1,67 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/sdkutils.h>
+
+/* clang-format off */
+static struct aws_error_info s_errors[] = {
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_GENERAL, "General error in SDK Utility library", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARSE_FATAL, "Parser encountered a fatal error", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE, "Parser encountered an error, but recovered", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET, "Ruleset version not supported", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED, "Ruleset parsing failed", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED, "Endpoints eval failed to initialize", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED, "Unexpected eval error", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET, "Ruleset has no rules", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED, "Ruleset was exhausted before finding a matching rule", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED, "Partitions version not supported.", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED, "Partitions parsing failed.", "aws-c-sdkutils"),
+};
+/* clang-format on */
+
+static struct aws_error_info_list s_sdkutils_error_info = {
+ .error_list = s_errors,
+ .count = sizeof(s_errors) / sizeof(struct aws_error_info),
+};
+
+static struct aws_log_subject_info s_log_subject_infos[] = {
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_SDKUTILS_GENERAL,
+ "SDKUtils",
+ "Subject for SDK utility logging that defies categorization."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_SDKUTILS_PROFILE, "AWSProfile", "Subject for AWS Profile parser and utilities"),
+};
+
+static struct aws_log_subject_info_list s_sdkutils_log_subjects = {
+ .subject_list = s_log_subject_infos,
+ .count = AWS_ARRAY_SIZE(s_log_subject_infos),
+};
+
+static int s_library_init_count = 0;
+
+void aws_sdkutils_library_init(struct aws_allocator *allocator) {
+ if (s_library_init_count++ != 0) {
+ return;
+ }
+
+ aws_common_library_init(allocator);
+
+ aws_register_error_info(&s_sdkutils_error_info);
+ aws_register_log_subject_info_list(&s_sdkutils_log_subjects);
+
+ aws_endpoints_rule_engine_init();
+}
+
+void aws_sdkutils_library_clean_up(void) {
+ if (--s_library_init_count != 0) {
+ return;
+ }
+
+ aws_unregister_log_subject_info_list(&s_sdkutils_log_subjects);
+ aws_unregister_error_info(&s_sdkutils_error_info);
+
+ aws_common_library_clean_up();
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/ya.make b/contrib/restricted/aws/aws-c-sdkutils/ya.make
new file mode 100644
index 00000000000..023ade1edb0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/ya.make
@@ -0,0 +1,43 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.1.9)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-sdkutils/archive/v0.1.9.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-common
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-sdkutils/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+)
+
+SRCS(
+ source/aws_profile.c
+ source/endpoints_rule_engine.c
+ source/endpoints_ruleset.c
+ source/endpoints_standard_lib.c
+ source/endpoints_types_impl.c
+ source/endpoints_util.c
+ source/partitions.c
+ source/resource_name.c
+ source/sdkutils.c
+)
+
+END()