aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/aws/aws-c-common
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-05-11 12:12:06 +0300
committerthegeorg <thegeorg@yandex-team.ru>2022-05-11 12:12:06 +0300
commit62f93da087b2fec0f89979fd11ac4d754ca36253 (patch)
tree67bf8ceb55e2d079f3575f9a7373584ad407d2a5 /contrib/restricted/aws/aws-c-common
parent8d55620139d4309265409767f873ba83fe046418 (diff)
downloadydb-62f93da087b2fec0f89979fd11ac4d754ca36253.tar.gz
Update aws-c-common and aws-c-io
* Update `contrib/restricted/aws/aws-c-io` to 0.11.0 * Backport cJSON symbol renaming logic from aws-sdk-cpp upstream ref:396829235a01ed34888651ee38ebd76c95510d6b
Diffstat (limited to 'contrib/restricted/aws/aws-c-common')
-rw-r--r--contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report27
-rw-r--r--contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report33
-rw-r--r--contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt24
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt11
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt11
-rw-r--r--contrib/restricted/aws/aws-c-common/README.md4
-rw-r--r--contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h3
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h42
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h8
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl26
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/assert.h36
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/bus.h97
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h63
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl10
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/clock.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl80
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h49
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h1
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/error.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h302
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/file.h198
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h5
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/json.h348
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/logging.h16
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/macros.h10
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h1
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/platform.h9
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h2
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl4
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h39
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/promise.h95
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h40
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h43
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h1
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h13
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/string.h119
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h26
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h7
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/thread.h83
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h60
-rw-r--r--contrib/restricted/aws/aws-c-common/source/allocator.c112
-rw-r--r--contrib/restricted/aws/aws-c-common/source/allocator_sba.c55
-rw-r--r--contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c32
-rw-r--r--contrib/restricted/aws/aws-c-common/source/bus.c724
-rw-r--r--contrib/restricted/aws/aws-c-common/source/byte_buf.c104
-rw-r--r--contrib/restricted/aws/aws-c-common/source/command_line_parser.c57
-rw-r--r--contrib/restricted/aws/aws-c-common/source/common.c77
-rw-r--r--contrib/restricted/aws/aws-c-common/source/date_time.c34
-rw-r--r--contrib/restricted/aws/aws-c-common/source/error.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/external/cJSON.c3113
-rw-r--r--contrib/restricted/aws/aws-c-common/source/file.c171
-rw-r--r--contrib/restricted/aws/aws-c-common/source/hash_table.c10
-rw-r--r--contrib/restricted/aws/aws-c-common/source/json.c344
-rw-r--r--contrib/restricted/aws/aws-c-common/source/log_writer.c7
-rw-r--r--contrib/restricted/aws/aws-c-common/source/logging.c74
-rw-r--r--contrib/restricted/aws/aws-c-common/source/memtrace.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/file.c279
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/system_info.c145
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/thread.c153
-rw-r--r--contrib/restricted/aws/aws-c-common/source/priority_queue.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/process_common.c25
-rw-r--r--contrib/restricted/aws/aws-c-common/source/promise.c115
-rw-r--r--contrib/restricted/aws/aws-c-common/source/ref_count.c53
-rw-r--r--contrib/restricted/aws/aws-c-common/source/resource_name.c111
-rw-r--r--contrib/restricted/aws/aws-c-common/source/ring_buffer.c74
-rw-r--r--contrib/restricted/aws/aws-c-common/source/string.c179
-rw-r--r--contrib/restricted/aws/aws-c-common/source/task_scheduler.c5
-rw-r--r--contrib/restricted/aws/aws-c-common/source/thread_scheduler.c225
-rw-r--r--contrib/restricted/aws/aws-c-common/source/thread_shared.c167
72 files changed, 7871 insertions, 556 deletions
diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report
index db3048dc1e..89570540b1 100644
--- a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report
+++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report
@@ -49,6 +49,7 @@ BELONGS ya.make
include/aws/common/atomics_gnu.inl [5:5]
include/aws/common/atomics_gnu_old.inl [4:4]
include/aws/common/atomics_msvc.inl [5:5]
+ include/aws/common/bus.h [5:5]
include/aws/common/byte_buf.h [4:4]
include/aws/common/byte_order.h [5:5]
include/aws/common/byte_order.inl [5:5]
@@ -68,7 +69,9 @@ BELONGS ya.make
include/aws/common/error.inl [5:5]
include/aws/common/exports.h [4:4]
include/aws/common/fifo_cache.h [4:4]
+ include/aws/common/file.h [4:4]
include/aws/common/hash_table.h [5:5]
+ include/aws/common/json.h [5:5]
include/aws/common/lifo_cache.h [4:4]
include/aws/common/linked_hash_table.h [4:4]
include/aws/common/linked_list.h [5:5]
@@ -98,10 +101,11 @@ BELONGS ya.make
include/aws/common/private/byte_buf.h [4:4]
include/aws/common/private/dlloads.h [4:4]
include/aws/common/private/hash_table_impl.h [5:5]
+ include/aws/common/private/thread_shared.h [4:4]
include/aws/common/private/xml_parser_impl.h [5:5]
include/aws/common/process.h [4:4]
+ include/aws/common/promise.h [2:2]
include/aws/common/ref_count.h [5:5]
- include/aws/common/resource_name.h [2:2]
include/aws/common/ring_buffer.h [4:4]
include/aws/common/ring_buffer.inl [4:4]
include/aws/common/rw_lock.h [5:5]
@@ -113,6 +117,7 @@ BELONGS ya.make
include/aws/common/system_info.h [5:5]
include/aws/common/task_scheduler.h [5:5]
include/aws/common/thread.h [5:5]
+ include/aws/common/thread_scheduler.h [4:4]
include/aws/common/time.h [4:4]
include/aws/common/uuid.h [5:5]
include/aws/common/xml_parser.h [5:5]
@@ -126,6 +131,7 @@ BELONGS ya.make
source/arch/intel/encoding_avx2.c [2:2]
source/array_list.c [2:2]
source/assert.c [2:2]
+ source/bus.c [2:2]
source/byte_buf.c [2:2]
source/cache.c [2:2]
source/codegen.c [2:2]
@@ -137,7 +143,9 @@ BELONGS ya.make
source/encoding.c [2:2]
source/error.c [2:2]
source/fifo_cache.c [2:2]
+ source/file.c [2:2]
source/hash_table.c [2:2]
+ source/json.c [2:2]
source/lifo_cache.c [2:2]
source/linked_hash_table.c [2:2]
source/log_channel.c [2:2]
@@ -151,6 +159,7 @@ BELONGS ya.make
source/posix/condition_variable.c [2:2]
source/posix/device_random.c [2:2]
source/posix/environment.c [2:2]
+ source/posix/file.c [2:2]
source/posix/mutex.c [2:2]
source/posix/process.c [2:2]
source/posix/rw_lock.c [2:2]
@@ -159,12 +168,14 @@ BELONGS ya.make
source/posix/time.c [2:2]
source/priority_queue.c [2:2]
source/process_common.c [2:2]
+ source/promise.c [2:2]
source/ref_count.c [2:2]
- source/resource_name.c [2:2]
source/ring_buffer.c [2:2]
source/statistics.c [2:2]
source/string.c [2:2]
source/task_scheduler.c [2:2]
+ source/thread_scheduler.c [2:2]
+ source/thread_shared.c [2:2]
source/uuid.c [2:2]
source/xml_parser.c [2:2]
@@ -178,3 +189,15 @@ BELONGS ya.make
Match type : COPYRIGHT
Files with this license:
source/posix/time.c [15:15]
+
+KEEP COPYRIGHT_SERVICE_LABEL bdcf211d81a69c0f282fb7543c1a24a7
+BELONGS ya.make
+ License text:
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/aws/common/external/cJSON.h [2:2]
+ source/external/cJSON.c [2:2]
diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report
index ecd816ac72..5c9d7d309e 100644
--- a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report
+++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report
@@ -54,7 +54,7 @@ BELONGS ya.make
Match type : TEXT
Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain-disclaimer.LICENSE
Files with this license:
- include/aws/common/private/lookup3.inl [21:22]
+ include/aws/common/private/lookup3.inl [24:25]
KEEP Public-Domain 5b7627115f23e7c5f0d8e352a16d9353
BELONGS ya.make
@@ -66,7 +66,19 @@ BELONGS ya.make
Match type : NOTICE
Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
Files with this license:
- include/aws/common/private/lookup3.inl [6:6]
+ include/aws/common/private/lookup3.inl [9:9]
+
+KEEP MIT 5debb370f50e1dfd24ff5144233a2ef6
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ include/aws/common/external/cJSON.h [4:20]
+ source/external/cJSON.c [4:20]
KEEP Apache-2.0 6c901454b872854c0dea3ec06b67701a
BELONGS ya.make
@@ -91,7 +103,7 @@ BELONGS ya.make
Match type : REFERENCE
Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
Files with this license:
- include/aws/common/private/lookup3.inl [16:16]
+ include/aws/common/private/lookup3.inl [19:19]
KEEP Apache-2.0 7b04071babb9b8532292659e4abba7e3
BELONGS ya.make
@@ -115,6 +127,7 @@ BELONGS ya.make
Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
Files with this license:
source/arch/arm/asm/cpuid.c [4:13]
+ source/bus.c [4:13]
KEEP Apache-2.0 d591512e466bb957030b8857f753349e
BELONGS ya.make
@@ -137,6 +150,7 @@ BELONGS ya.make
include/aws/common/atomics_gnu.inl [6:6]
include/aws/common/atomics_gnu_old.inl [5:5]
include/aws/common/atomics_msvc.inl [6:6]
+ include/aws/common/bus.h [6:6]
include/aws/common/byte_buf.h [5:5]
include/aws/common/byte_order.h [6:6]
include/aws/common/byte_order.inl [6:6]
@@ -156,7 +170,9 @@ BELONGS ya.make
include/aws/common/error.inl [6:6]
include/aws/common/exports.h [5:5]
include/aws/common/fifo_cache.h [5:5]
+ include/aws/common/file.h [5:5]
include/aws/common/hash_table.h [6:6]
+ include/aws/common/json.h [6:6]
include/aws/common/lifo_cache.h [5:5]
include/aws/common/linked_hash_table.h [5:5]
include/aws/common/linked_list.h [6:6]
@@ -186,10 +202,11 @@ BELONGS ya.make
include/aws/common/private/byte_buf.h [5:5]
include/aws/common/private/dlloads.h [5:5]
include/aws/common/private/hash_table_impl.h [6:6]
+ include/aws/common/private/thread_shared.h [5:5]
include/aws/common/private/xml_parser_impl.h [6:6]
include/aws/common/process.h [5:5]
+ include/aws/common/promise.h [3:3]
include/aws/common/ref_count.h [6:6]
- include/aws/common/resource_name.h [3:3]
include/aws/common/ring_buffer.h [5:5]
include/aws/common/ring_buffer.inl [5:5]
include/aws/common/rw_lock.h [6:6]
@@ -201,6 +218,7 @@ BELONGS ya.make
include/aws/common/system_info.h [6:6]
include/aws/common/task_scheduler.h [6:6]
include/aws/common/thread.h [6:6]
+ include/aws/common/thread_scheduler.h [5:5]
include/aws/common/time.h [5:5]
include/aws/common/uuid.h [6:6]
include/aws/common/xml_parser.h [6:6]
@@ -224,7 +242,9 @@ BELONGS ya.make
source/encoding.c [3:3]
source/error.c [3:3]
source/fifo_cache.c [3:3]
+ source/file.c [3:3]
source/hash_table.c [3:3]
+ source/json.c [3:3]
source/lifo_cache.c [3:3]
source/linked_hash_table.c [3:3]
source/log_channel.c [3:3]
@@ -238,6 +258,7 @@ BELONGS ya.make
source/posix/condition_variable.c [3:3]
source/posix/device_random.c [3:3]
source/posix/environment.c [3:3]
+ source/posix/file.c [3:3]
source/posix/mutex.c [3:3]
source/posix/process.c [3:3]
source/posix/rw_lock.c [3:3]
@@ -246,12 +267,14 @@ BELONGS ya.make
source/posix/time.c [3:3]
source/priority_queue.c [3:3]
source/process_common.c [3:3]
+ source/promise.c [3:3]
source/ref_count.c [3:3]
- source/resource_name.c [3:3]
source/ring_buffer.c [3:3]
source/statistics.c [3:3]
source/string.c [3:3]
source/task_scheduler.c [3:3]
+ source/thread_scheduler.c [3:3]
+ source/thread_shared.c [3:3]
source/uuid.c [3:3]
source/xml_parser.c [3:3]
diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt b/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt
index c74e7c0ea5..a950dafebe 100644
--- a/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt
+++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt
@@ -264,11 +264,35 @@ This library is licensed under the Apache 2.0 License.
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+====================COPYRIGHT====================
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+
====================File: NOTICE====================
AWS C Common
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+====================MIT====================
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
====================Public-Domain====================
* The following public domain code has been modified as follows:
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
index 3728d92779..39146acbd8 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
@@ -10,10 +10,12 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DCJSON_HIDE_SYMBOLS
-DHAVE_AVX2_INTRINSICS
-DHAVE_MM256_EXTRACT_EPI64
-DHAVE_SYSCONF
-DUSE_SIMD_ENCODING
+ -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE
)
target_include_directories(restricted-aws-aws-c-common PUBLIC
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/generated/include
@@ -33,6 +35,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
@@ -43,8 +46,11 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/encoding.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/error.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/fifo_cache.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/hash_table.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/json.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/lifo_cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/log_channel.c
@@ -58,6 +64,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/mutex.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/process.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c
@@ -66,12 +73,14 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/time.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/priority_queue.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/process_common.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/promise.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ref_count.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/resource_name.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/statistics.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/string.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_shared.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/uuid.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/xml_parser.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
index cbb6afb997..6de6ac5160 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
@@ -10,10 +10,12 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DCJSON_HIDE_SYMBOLS
-DHAVE_AVX2_INTRINSICS
-DHAVE_MM256_EXTRACT_EPI64
-DHAVE_SYSCONF
-DUSE_SIMD_ENCODING
+ -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR
)
target_include_directories(restricted-aws-aws-c-common PUBLIC
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/generated/include
@@ -29,6 +31,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
@@ -39,8 +42,11 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/encoding.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/error.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/fifo_cache.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/hash_table.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/json.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/lifo_cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/log_channel.c
@@ -54,6 +60,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/mutex.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/process.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c
@@ -62,12 +69,14 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/time.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/priority_queue.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/process_common.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/promise.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ref_count.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/resource_name.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/statistics.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/string.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_shared.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/uuid.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/xml_parser.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c
diff --git a/contrib/restricted/aws/aws-c-common/README.md b/contrib/restricted/aws/aws-c-common/README.md
index 054c918735..95f4191c6d 100644
--- a/contrib/restricted/aws/aws-c-common/README.md
+++ b/contrib/restricted/aws/aws-c-common/README.md
@@ -121,8 +121,8 @@ have pre-slotted log subjects & error codes for each library. The currently allo
| [0x2C00, 0x3000) | aws-crt-nodejs |
| [0x3000, 0x3400) | aws-crt-dotnet |
| [0x3400, 0x3800) | aws-c-iot |
-| [0x3800, 0x3C00) | (reserved for future project) |
-| [0x3C00, 0x4000) | (reserved for future project) |
+| [0x3800, 0x3C00) | aws-c-s3 |
+| [0x3C00, 0x4000) | aws-c-sdkutils |
| [0x4000, 0x4400) | (reserved for future project) |
| [0x4400, 0x4800) | (reserved for future project) |
diff --git a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
index decbdf88f0..f152531c17 100644
--- a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
+++ b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
@@ -15,6 +15,9 @@
#define AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS
#define AWS_HAVE_GCC_INLINE_ASM
/* #undef AWS_HAVE_MSVC_MULX */
+#define AWS_HAVE_POSIX_LARGE_FILE_SUPPORT
/* #undef AWS_HAVE_EXECINFO */
+/* #undef AWS_HAVE_WINAPI_DESKTOP */
+#define AWS_HAVE_LINUX_IF_LINK_H
#endif
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h b/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h
index ba4d9d5c17..9d7f2bb500 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h
@@ -52,14 +52,20 @@ void aws_wrapped_cf_allocator_destroy(CFAllocatorRef allocator);
#endif
/**
- * Returns at least `size` of memory ready for usage or returns NULL on failure.
+ * Returns at least `size` of memory ready for usage. In versions v0.6.8 and prior, this function was allowed to return
+ * NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_acquire(struct aws_allocator *allocator, size_t size);
/**
* Allocates a block of memory for an array of num elements, each of them size bytes long, and initializes all its bits
- * to zero. Returns null on failure.
+ * to zero. In versions v0.6.8 and prior, this function was allowed to return NULL.
+ * In later versions, if allocator->mem_calloc() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_calloc() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
@@ -72,6 +78,11 @@ void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
* in the same contiguous block of memory.
*
* Returns a pointer to the allocation.
+ *
+ * In versions v0.6.8 and prior, this function was allowed to return
+ * NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...);
@@ -83,13 +94,15 @@ void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...);
AWS_COMMON_API
void aws_mem_release(struct aws_allocator *allocator, void *ptr);
-/*
+/**
* Attempts to adjust the size of the pointed-to memory buffer from oldsize to
* newsize. The pointer (*ptr) may be changed if the memory needs to be
* reallocated.
*
- * If reallocation fails, *ptr is unchanged, and this method raises an
- * AWS_ERROR_OOM error.
+ * In versions v0.6.8 and prior, this function was allowed to return
+ * NULL. In later versions, if allocator->mem_realloc() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_realloc() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, size_t newsize);
@@ -166,6 +179,25 @@ struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *alloca
AWS_COMMON_API
void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator);
+/*
+ * Returns the number of bytes currently active in the SBA
+ */
+AWS_COMMON_API
+size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator);
+
+/*
+ * Returns the number of bytes reserved in pages/bins inside the SBA, e.g. the
+ * current system memory used by the SBA
+ */
+AWS_COMMON_API
+size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator);
+
+/*
+ * Returns the page size that the SBA is using
+ */
+AWS_COMMON_API
+size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator);
+
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ALLOCATOR_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h
index 1eb7f773cf..bbd50ead32 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h
@@ -88,6 +88,14 @@ AWS_STATIC_IMPL
int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *val);
/**
+ * Pushes the memory pointed to by val onto the front of internal list.
+ * This call results in shifting all of the elements in the list. Avoid this call unless that
+ * is intended behavior.
+ */
+AWS_STATIC_IMPL
+int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val);
+
+/**
* Deletes the element at the front of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised.
* This call results in shifting all of the elements at the end of the array to the front. Avoid this call unless that
* is intended behavior.
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
index d3ca30ecda..4e64a96a66 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
@@ -162,6 +162,32 @@ int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *v
}
AWS_STATIC_IMPL
+int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val) {
+ AWS_PRECONDITION(aws_array_list_is_valid(list));
+ AWS_PRECONDITION(
+ val && AWS_MEM_IS_READABLE(val, list->item_size),
+ "Input pointer [val] must point writable memory of [list->item_size] bytes.");
+ size_t orig_len = aws_array_list_length(list);
+ int err_code = aws_array_list_ensure_capacity(list, orig_len);
+
+ if (err_code && aws_last_error() == AWS_ERROR_INVALID_INDEX && !list->alloc) {
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return aws_raise_error(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE);
+ } else if (err_code) {
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return err_code;
+ }
+ if (orig_len) {
+ memmove((uint8_t *)list->data + list->item_size, list->data, orig_len * list->item_size);
+ }
+ ++list->length;
+ memcpy(list->data, val, list->item_size);
+
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return err_code;
+}
+
+AWS_STATIC_IMPL
int aws_array_list_pop_front(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
if (aws_array_list_length(list) > 0) {
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h b/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
index 7ab9973ef2..e7ce341ce0 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
@@ -19,6 +19,24 @@ void aws_fatal_assert(const char *cond_str, const char *file, int line) AWS_ATTR
AWS_EXTERN_C_END
#if defined(CBMC)
+# define AWS_PANIC_OOM(mem, msg) \
+ do { \
+ if (!(mem)) { \
+ fprintf(stderr, "%s: %s, line %d", msg, __FILE__, __LINE__); \
+ exit(-1); \
+ } \
+ } while (0)
+#else
+# define AWS_PANIC_OOM(mem, msg) \
+ do { \
+ if (!(mem)) { \
+ fprintf(stderr, "%s", msg); \
+ abort(); \
+ } \
+ } while (0)
+#endif /* defined(CBMC) */
+
+#if defined(CBMC)
# define AWS_ASSUME(cond) __CPROVER_assume(cond)
#elif defined(_MSC_VER)
# define AWS_ASSUME(cond) __assume(cond)
@@ -86,8 +104,8 @@ AWS_EXTERN_C_END
# define AWS_POSTCONDITION1(cond) __CPROVER_assert((cond), # cond " check failed")
# define AWS_FATAL_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation))
# define AWS_FATAL_POSTCONDITION1(cond) __CPROVER_assert((cond), # cond " check failed")
-# define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
-# define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
+# define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
+# define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
#else
# define AWS_PRECONDITION2(cond, expl) AWS_ASSERT(cond)
# define AWS_PRECONDITION1(cond) AWS_ASSERT(cond)
@@ -98,12 +116,22 @@ AWS_EXTERN_C_END
# define AWS_FATAL_POSTCONDITION2(cond, expl) AWS_FATAL_ASSERT(cond)
# define AWS_FATAL_POSTCONDITION1(cond) AWS_FATAL_ASSERT(cond)
/**
+ * These macros should not be used in is_valid functions.
+ * All validate functions are also used in assumptions for CBMC proofs,
+ * which should not contain __CPROVER_*_ok primitives. The use of these primitives
+ * in assumptions may lead to spurious results.
* The C runtime does not give a way to check these properties,
* but we can at least check that the pointer is valid. */
-# define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (base))
-# define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base))
+# define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (base))
+# define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (base))
#endif /* CBMC */
+/**
+ * These macros can safely be used in validate functions.
+ */
+#define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (base))
+#define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base))
+
/* Logical consequence. */
#define AWS_IMPLIES(a, b) (!(a) || (b))
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h b/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h
new file mode 100644
index 0000000000..fe5127e6f7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h
@@ -0,0 +1,97 @@
+#ifndef AWS_COMMON_BUS_H
+#define AWS_COMMON_BUS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+
+/*
+ * A message bus is a mapping of integer message addresses/types -> listeners/callbacks.
+ * A listener can listen to a single message, or to all messages on a bus
+ * Message addresses/types can be any 64-bit integer, starting at 1.
+ * AWS_BUS_ADDRESS_ALL (0xffffffffffffffff) is reserved for broadcast to all listeners.
+ * AWS_BUS_ADDRESS_CLOSE (0) is reserved for notifying listeners to clean up
+ * Listeners will be sent a message of type AWS_BUS_ADDRESS_CLOSE when it is time to clean any state up.
+ * Listeners are owned by the subscriber, and are no longer referenced by the bus once unsubscribed.
+ * Under the AWS_BUS_ASYNC policy, message delivery happens in a separate thread from sending, so listeners are
+ * responsible for their own thread safety.
+ */
+struct aws_bus;
+
+enum aws_bus_policy {
+ /**
+ * Messages will be delivered, even if dynamic allocation is required. Default.
+ */
+ AWS_BUS_ASYNC_RELIABLE = 0x0,
+ /**
+ * Only memory from the bus's internal buffer will be used (if a buffer size is supplied at bus creation time).
+ * If the buffer is full, older buffered messages will be discarded to make room for newer messages.
+ */
+ AWS_BUS_ASYNC_UNRELIABLE = 0x1,
+ /**
+ * Message delivery is immediate, and therefore reliable by definition
+ */
+ AWS_BUS_SYNC_RELIABLE = 0x2,
+};
+
+/**
+ * Subscribing to AWS_BUS_ADDRESS_ALL will cause the listener to be invoked for every message sent to the bus
+ * It is possible to send to AWS_BUS_ADDRESS_ALL, just be aware that this will only send to listeners subscribed
+ * to AWS_BUS_ADDRESS_ALL.
+ */
+#define AWS_BUS_ADDRESS_ALL ((uint64_t)-1)
+#define AWS_BUS_ADDRESS_CLOSE 0
+
+struct aws_bus_options {
+ enum aws_bus_policy policy;
+ /**
+ * Size of buffer for unreliable message delivery queue.
+ * Unused if policy is AWS_BUS_ASYNC_RELIABNLE or AWS_BUS_SYNC_RELIABLE
+ * Messages are 40 bytes. Default buffer_size is 4K. The bus will not allocate memory beyond this size.
+ */
+ size_t buffer_size;
+ /* Not supported yet, but event loop group for delivery */
+ struct aws_event_loop_group *event_loop_group;
+};
+
+/* Signature for listener callbacks */
+typedef void(aws_bus_listener_fn)(uint64_t address, const void *payload, void *user_data);
+
+/**
+ * Allocates and initializes a message bus
+ */
+AWS_COMMON_API
+struct aws_bus *aws_bus_new(struct aws_allocator *allocator, const struct aws_bus_options *options);
+
+/**
+ * Cleans up a message bus, including notifying all remaining listeners to close
+ */
+AWS_COMMON_API
+void aws_bus_destroy(struct aws_bus *bus);
+
+/**
+ * Subscribes a listener to a message type. user_data's lifetime is the responsibility of the subscriber.
+ */
+AWS_COMMON_API
+int aws_bus_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data);
+
+/**
+ * Unsubscribe a listener from a specific message. This is only necessary if the listener has lifetime concerns.
+ * Otherwise, the listener will be called with an address of AWS_BUS_ADDRESS_CLOSE, which indicates that user_data
+ * can be cleaned up if necessary and the listener will never be called again.
+ */
+AWS_COMMON_API
+void aws_bus_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data);
+
+/**
+ * Sends a message to any listeners. payload will live until delivered, and then the destructor (if
+ * provided) will be called. Note that anything payload references must also live at least until it is destroyed.
+ * Will return AWS_OP_ERR if the bus is closing/has been closed
+ */
+AWS_COMMON_API
+int aws_bus_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *));
+
+#endif /* AWS_COMMON_BUS_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h
index 8e79a93b27..e6464b4780 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h
@@ -125,6 +125,15 @@ AWS_COMMON_API int aws_byte_buf_init_copy(
const struct aws_byte_buf *src);
/**
+ * Reads 'filename' into 'out_buf'. If successful, 'out_buf' is allocated and filled with the data;
+ * It is your responsibility to call 'aws_byte_buf_clean_up()' on it. Otherwise, 'out_buf' remains
+ * unused. In the very unfortunate case where some API needs to treat out_buf as a c_string, a null terminator
+ * is appended, but is not included as part of the length field.
+ */
+AWS_COMMON_API
+int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename);
+
+/**
* Evaluates the set of properties that define the shape of all valid aws_byte_buf structures.
* It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion).
*/
@@ -496,6 +505,20 @@ AWS_COMMON_API
bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str);
/**
+ * Return true if the input starts with the prefix (exact byte comparison).
+ */
+AWS_COMMON_API
+bool aws_byte_cursor_starts_with(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix);
+
+/**
+ * Return true if the input starts with the prefix (case-insensitive).
+ * The "C" locale is used for comparing upper and lowercase letters.
+ * Data is assumed to be ASCII text, UTF-8 will work fine too.
+ */
+AWS_COMMON_API
+bool aws_byte_cursor_starts_with_ignore_case(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix);
+
+/**
* Case-insensitive hash function for array containing ASCII or UTF-8 text.
*/
AWS_COMMON_API
@@ -873,6 +896,46 @@ AWS_COMMON_API bool aws_isxdigit(uint8_t ch);
*/
AWS_COMMON_API bool aws_isspace(uint8_t ch);
+/**
+ * Read entire cursor as ASCII/UTF-8 unsigned base-10 number.
+ * Stricter than strtoull(), which allows whitespace and inputs that start with "0x"
+ *
+ * Examples:
+ * "0" -> 0
+ * "123" -> 123
+ * "00004" -> 4 // leading zeros ok
+ *
+ * Rejects things like:
+ * "-1" // negative numbers not allowed
+ * "1,000" // only characters 0-9 allowed
+ * "" // blank string not allowed
+ * " 0 " // whitespace not allowed
+ * "0x0" // hex not allowed
+ * "FF" // hex not allowed
+ * "999999999999999999999999999999999999999999" // larger than max u64
+ */
+AWS_COMMON_API
+int aws_byte_cursor_utf8_parse_u64(struct aws_byte_cursor cursor, uint64_t *dst);
+
+/**
+ * Read entire cursor as ASCII/UTF-8 unsigned base-16 number with NO "0x" prefix.
+ *
+ * Examples:
+ * "F" -> 15
+ * "000000ff" -> 255 // leading zeros ok
+ * "Ff" -> 255 // mixed case ok
+ * "123" -> 291
+ * "FFFFFFFFFFFFFFFF" -> 18446744073709551616 // max u64
+ *
+ * Rejects things like:
+ * "0x0" // 0x prefix not allowed
+ * "" // blank string not allowed
+ * " F " // whitespace not allowed
+ * "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" // larger than max u64
+ */
+AWS_COMMON_API
+int aws_byte_cursor_utf8_parse_u64_hex(struct aws_byte_cursor cursor, uint64_t *dst);
+
AWS_EXTERN_C_END
#endif /* AWS_COMMON_BYTE_BUF_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl
index 2ba777909c..1204be06a1 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl
@@ -9,7 +9,7 @@
#include <aws/common/byte_order.h>
#include <aws/common/common.h>
-#ifdef _MSC_VER
+#ifdef _WIN32
# include <stdlib.h>
#else
# include <netinet/in.h>
@@ -59,7 +59,7 @@ AWS_STATIC_IMPL uint64_t aws_ntoh64(uint64_t x) {
* Convert 32 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint32_t aws_hton32(uint32_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ulong(x);
#else
return htonl(x);
@@ -116,7 +116,7 @@ AWS_STATIC_IMPL double aws_htonf64(double x) {
* Convert 32 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint32_t aws_ntoh32(uint32_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ulong(x);
#else
return ntohl(x);
@@ -141,7 +141,7 @@ AWS_STATIC_IMPL double aws_ntohf64(double x) {
* Convert 16 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ushort(x);
#else
return htons(x);
@@ -152,7 +152,7 @@ AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x) {
* Convert 16 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint16_t aws_ntoh16(uint16_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ushort(x);
#else
return ntohs(x);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h
index 489a5f19a1..68b202f1bd 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h
@@ -32,6 +32,12 @@ AWS_STATIC_IMPL uint64_t aws_timestamp_convert(
uint64_t *remainder);
/**
+ * More general form of aws_timestamp_convert that takes arbitrary frequencies rather than the timestamp enum.
+ */
+AWS_STATIC_IMPL uint64_t
+ aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder);
+
+/**
* Get ticks in nanoseconds (usually 100 nanosecond precision) on the high resolution clock (most-likely TSC). This
* clock has no bearing on the actual system time. On success, timestamp will be set.
*/
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl
index d741a43419..d0a1b8c253 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl
@@ -15,31 +15,75 @@ AWS_EXTERN_C_BEGIN
/**
* Converts 'timestamp' from unit 'convert_from' to unit 'convert_to', if the units are the same then 'timestamp' is
* returned. If 'remainder' is NOT NULL, it will be set to the remainder if convert_from is a more precise unit than
- * convert_to. To avoid unnecessary branching, 'remainder' is not zero initialized in this function, be sure to set it
- * to 0 first if you care about that kind of thing. If conversion would lead to integer overflow, the timestamp
- * returned will be the highest possible time that is representable, i.e. UINT64_MAX.
+ * convert_to (but only if the old frequency is a multiple of the new one). If conversion would lead to integer
+ * overflow, the timestamp returned will be the highest possible time that is representable, i.e. UINT64_MAX.
*/
+AWS_STATIC_IMPL uint64_t
+ aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder) {
+
+ AWS_FATAL_ASSERT(old_frequency > 0 && new_frequency > 0);
+
+ /*
+ * The remainder, as defined in the contract of the original version of this function, only makes mathematical
+ * sense when the old frequency is a positive multiple of the new frequency. The new convert function needs to be
+ * backwards compatible with the old version's remainder while being a lot more accurate with its conversions
+ * in order to handle extreme edge cases of large numbers.
+ */
+ if (remainder != NULL) {
+ *remainder = 0;
+ /* only calculate remainder when going from a higher to lower frequency */
+ if (new_frequency < old_frequency) {
+ uint64_t frequency_remainder = old_frequency % new_frequency;
+ /* only calculate remainder when the old frequency is evenly divisible by the new one */
+ if (frequency_remainder == 0) {
+ uint64_t frequency_ratio = old_frequency / new_frequency;
+ *remainder = ticks % frequency_ratio;
+ }
+ }
+ }
+
+ /*
+ * Now do the actual conversion.
+ */
+ uint64_t old_seconds_elapsed = ticks / old_frequency;
+ uint64_t old_remainder = ticks - old_seconds_elapsed * old_frequency;
+
+ uint64_t new_ticks_whole_part = aws_mul_u64_saturating(old_seconds_elapsed, new_frequency);
+
+ /*
+ * This could be done in one of three ways:
+ *
+ * (1) (old_remainder / old_frequency) * new_frequency - this would be completely wrong since we know that
+ * (old_remainder / old_frequency) < 1 = 0
+ *
+ * (2) old_remainder * (new_frequency / old_frequency) - this only gives a good solution when new_frequency is
+ * a multiple of old_frequency
+ *
+ * (3) (old_remainder * new_frequency) / old_frequency - this is how we do it below, the primary concern is if
+ * the initial multiplication can overflow. For that to be the case, we would need to be using old and new
+ * frequencies in the billions. This does not appear to be the case in any current machine's hardware counters.
+ *
+ * Ignoring arbitrary frequencies, even a nanosecond to nanosecond conversion would not overflow either.
+ *
+ * If this did become an issue, we would potentially need to use intrinsics/platform support for 128 bit math.
+ *
+ * For review consideration:
+ * (1) should we special case frequencies being a multiple of the other?
+ * (2) should we special case frequencies being the same? A ns-to-ns conversion does the full math and
+ * approaches overflow (but cannot actually do so).
+ */
+ uint64_t new_ticks_remainder_part = aws_mul_u64_saturating(old_remainder, new_frequency) / old_frequency;
+
+ return aws_add_u64_saturating(new_ticks_whole_part, new_ticks_remainder_part);
+}
+
AWS_STATIC_IMPL uint64_t aws_timestamp_convert(
uint64_t timestamp,
enum aws_timestamp_unit convert_from,
enum aws_timestamp_unit convert_to,
uint64_t *remainder) {
- uint64_t diff = 0;
-
- if (convert_to > convert_from) {
- diff = convert_to / convert_from;
- return aws_mul_u64_saturating(timestamp, diff);
- } else if (convert_to < convert_from) {
- diff = convert_from / convert_to;
- if (remainder) {
- *remainder = timestamp % diff;
- }
-
- return timestamp / diff;
- } else {
- return timestamp;
- }
+ return aws_timestamp_convert_u64(timestamp, convert_from, convert_to, remainder);
}
AWS_EXTERN_C_END
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h b/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
index 8b31ae98ef..7184dcd68a 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
@@ -12,6 +12,21 @@ enum aws_cli_options_has_arg {
AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT = 2,
};
+/**
+ * Invoked when a subcommand is encountered. argc and argv[] begins at the command encounterd.
+ * command_name is the name of the command being handled.
+ */
+typedef int(aws_cli_options_subcommand_fn)(int argc, char *const argv[], const char *command_name, void *user_data);
+
+/**
+ * Dispatch table to dispatch cli commands from.
+ * command_name should be the exact string for the command you want to handle from the command line.
+ */
+struct aws_cli_subcommand_dispatch {
+ aws_cli_options_subcommand_fn *subcommand_fn;
+ const char *command_name;
+};
+
/* Ignoring padding since we're trying to maintain getopt.h compatibility */
/* NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) */
struct aws_cli_option {
@@ -34,6 +49,11 @@ AWS_COMMON_API extern int aws_cli_optind;
AWS_COMMON_API extern const char *aws_cli_optarg;
/**
+ * If 0x02 was returned by aws_cli_getopt_long(), this value will be set to the argument encountered.
+ */
+AWS_COMMON_API extern const char *aws_cli_positional_arg;
+
+/**
* A mostly compliant implementation of posix getopt_long(). Parses command-line arguments. argc is the number of
* command line arguments passed in argv. optstring contains the legitimate option characters. The option characters
* coorespond to aws_cli_option::val. If the character is followed by a :, the option requires an argument. If it is
@@ -45,7 +65,8 @@ AWS_COMMON_API extern const char *aws_cli_optarg;
* If longindex is non-null, it will be set to the index in longopts, for the found option.
*
* Returns option val if it was found, '?' if an option was encountered that was not specified in the option string,
- * returns -1 when all arguments that can be parsed have been parsed.
+ * 0x02 (START_OF_TEXT) will be returned if a positional argument was encountered. returns -1 when all arguments that
+ * can be parsed have been parsed.
*/
AWS_COMMON_API int aws_cli_getopt_long(
int argc,
@@ -53,6 +74,32 @@ AWS_COMMON_API int aws_cli_getopt_long(
const char *optstring,
const struct aws_cli_option *longopts,
int *longindex);
+
+/**
+ * Resets global parser state for use in another parser run for the application.
+ */
+AWS_COMMON_API void aws_cli_reset_state(void);
+
+/**
+ * Dispatches the current command line arguments with a subcommand from the second input argument in argv[], if
+ * dispatch table contains a command that matches the argument. When the command is dispatched, argc and argv will be
+ * updated to reflect the new argument count. The cli options are required to come after the subcommand. If either, no
+ * dispatch was found or there was no argument passed to the program, this function will return AWS_OP_ERR. Check
+ * aws_last_error() for details on the error.
+ * @param argc number of arguments passed to int main()
+ * @param argv the arguments passed to int main()
+ * @param parse_cb, optional, specify NULL if you don't want to handle this. This argument is for parsing "meta"
+ * commands from the command line options prior to dispatch occurring.
+ * @param dispatch_table table containing functions and command name to dispatch on.
+ * @param table_length numnber of entries in dispatch_table.
+ * @return AWS_OP_SUCCESS(0) on success, AWS_OP_ERR(-1) on failure
+ */
+AWS_COMMON_API int aws_cli_dispatch_on_subcommand(
+ int argc,
+ char *const argv[],
+ struct aws_cli_subcommand_dispatch *dispatch_table,
+ int table_length,
+ void *user_data);
AWS_EXTERN_C_END
#endif /* AWS_COMMON_COMMAND_LINE_PARSER_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h b/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h
index 5522c4fae5..40d83f864f 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h
@@ -48,6 +48,7 @@ enum aws_date_day_of_week {
struct aws_date_time {
time_t timestamp;
+ uint16_t milliseconds;
char tz[6];
struct tm gmt_time;
struct tm local_time;
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
index 200de33146..f12fc730b9 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
@@ -143,6 +143,7 @@ AWS_EXTERN_C_END
enum aws_common_error {
AWS_ERROR_SUCCESS = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID),
AWS_ERROR_OOM,
+ AWS_ERROR_NO_SPACE,
AWS_ERROR_UNKNOWN,
AWS_ERROR_SHORT_BUFFER,
AWS_ERROR_OVERFLOW_DETECTED,
@@ -190,7 +191,10 @@ enum aws_common_error {
AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED,
AWS_ERROR_STRING_MATCH_NOT_FOUND,
AWS_ERROR_DIVIDE_BY_ZERO,
-
+ AWS_ERROR_INVALID_FILE_HANDLE,
+ AWS_ERROR_OPERATION_INTERUPTED,
+ AWS_ERROR_DIRECTORY_NOT_EMPTY,
+ AWS_ERROR_PLATFORM_NOT_SUPPORTED,
AWS_ERROR_END_COMMON_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h b/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h
new file mode 100644
index 0000000000..3210e8ab37
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h
@@ -0,0 +1,302 @@
+/*
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+/*
+ * This file has been modified from its original version by Amazon:
+ * (1) Address clang-tidy errors by renaming function parameters in a number of places
+ * to match their .c counterparts.
+ * (2) Misc tweaks to unchecked writes to make security static analysis happier
+ */
+
+/* clang-format off */
+
+#ifndef AWS_COMMON_EXTERNAL_CJSON_H // NOLINT
+#define AWS_COMMON_EXTERNAL_CJSON_H // NOLINT
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
+#define __WINDOWS__
+#endif
+
+#ifdef __WINDOWS__
+
+/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options:
+
+CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols
+CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default)
+CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol
+
+For *nix builds that support visibility attribute, you can define similar behavior by
+
+setting default visibility to hidden by adding
+-fvisibility=hidden (for gcc)
+or
+-xldscope=hidden (for sun cc)
+to CFLAGS
+
+then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does
+
+*/
+
+#define CJSON_CDECL __cdecl
+#define CJSON_STDCALL __stdcall
+
+/* export symbols by default, this is necessary for copy pasting the C and header file */
+#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_EXPORT_SYMBOLS
+#endif
+
+#if defined(CJSON_HIDE_SYMBOLS)
+#define CJSON_PUBLIC(type) type CJSON_STDCALL
+#elif defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
+#elif defined(CJSON_IMPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
+#endif
+#else /* !__WINDOWS__ */
+#define CJSON_CDECL
+#define CJSON_STDCALL
+
+#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY)
+#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
+#else
+#define CJSON_PUBLIC(type) type
+#endif
+#endif
+
+/* project version */
+#define CJSON_VERSION_MAJOR 1
+#define CJSON_VERSION_MINOR 7
+#define CJSON_VERSION_PATCH 15
+
+#include <stddef.h>
+
+/* cJSON Types: */
+#define cJSON_Invalid (0)
+#define cJSON_False (1 << 0)
+#define cJSON_True (1 << 1)
+#define cJSON_NULL (1 << 2)
+#define cJSON_Number (1 << 3)
+#define cJSON_String (1 << 4)
+#define cJSON_Array (1 << 5)
+#define cJSON_Object (1 << 6)
+#define cJSON_Raw (1 << 7) /* raw json */
+
+#define cJSON_IsReference 256
+#define cJSON_StringIsConst 512
+
+/* The cJSON structure: */
+typedef struct cJSON
+{
+ /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */
+ struct cJSON *next;
+ struct cJSON *prev;
+ /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */
+ struct cJSON *child;
+
+ /* The type of the item, as above. */
+ int type;
+
+ /* The item's string, if type==cJSON_String and type == cJSON_Raw */
+ char *valuestring;
+ /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */
+ int valueint;
+ /* The item's number, if type==cJSON_Number */
+ double valuedouble;
+
+ /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */
+ char *string;
+} cJSON;
+
+typedef struct cJSON_Hooks
+{
+ /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */
+ void *(CJSON_CDECL *malloc_fn)(size_t sz); // NOLINT
+ void (CJSON_CDECL *free_fn)(void *ptr);
+} cJSON_Hooks;
+
+typedef int cJSON_bool;
+
+/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them.
+* This is to prevent stack overflows. */
+#ifndef CJSON_NESTING_LIMIT
+#define CJSON_NESTING_LIMIT 1000
+#endif
+
+/* returns the version of cJSON as a string */
+CJSON_PUBLIC(const char*) cJSON_Version(void);
+
+/* Supply malloc, realloc and free functions to cJSON */
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks);
+
+/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */
+/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length);
+/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */
+/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated);
+
+/* Render a cJSON entity to text for transfer/storage. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
+/* Render a cJSON entity to text for transfer/storage without any formatting. */
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
+/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
+/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */
+/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format);
+/* Delete a cJSON entity and all subentities. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
+
+/* Returns the number of items in an array (or object). */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
+/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
+/* Get item "string" from object. Case insensitive. */
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string);
+/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
+
+/* Check item type and return its value */
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item);
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item);
+
+/* These functions check the type of an item */
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item);
+
+/* These calls create a cJSON item of the appropriate type. */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
+/* raw json */
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
+
+/* Create a string where valuestring references a string so
+* it will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
+/* Create an object/array that only references it's elements so
+* they will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
+
+/* These utilities create an Array of count items.
+* The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count);
+
+/* Append item to the specified array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
+/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object.
+* WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before
+* writing to `item->string` */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
+/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
+
+/* Remove/Detach items from Arrays/Objects. */
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
+
+/* Update array items. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem);
+
+/* Duplicate a cJSON item */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
+/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will
+* need to be released. With recurse!=0, it will duplicate any children connected to the item.
+* The item->next and ->prev pointers are always zero on return from Duplicate. */
+/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal.
+* case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); // NOLINT
+
+/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings.
+* The input pointer json cannot point to a read-only address area, such as a string constant,
+* but should point to a readable and writable address area. */
+CJSON_PUBLIC(void) cJSON_Minify(char *json);
+
+/* Helper functions for creating and adding items to an object at the same time.
+* They return the added item or NULL on failure. */
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean);
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number);
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string);
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw);
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name);
+
+/* When assigning an integer value, it needs to be propagated to valuedouble too. */
+#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) //NOLINT
+/* helper for the cJSON_SetNumberValue macro */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
+#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) //NOLINT
+/* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring);
+
+/* Macro for iterating over an array or object */
+#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) //NOLINT
+
+/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
+CJSON_PUBLIC(void) cJSON_free(void *object);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/file.h b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h
new file mode 100644
index 0000000000..4bbc1540db
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h
@@ -0,0 +1,198 @@
+#ifndef AWS_COMMON_FILE_H
+#define AWS_COMMON_FILE_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/byte_buf.h>
+#include <aws/common/common.h>
+#include <aws/common/platform.h>
+#include <stdio.h>
+
+#ifdef AWS_OS_WINDOWS
+# define AWS_PATH_DELIM '\\'
+# define AWS_PATH_DELIM_STR "\\"
+#else
+# define AWS_PATH_DELIM '/'
+# define AWS_PATH_DELIM_STR "/"
+#endif
+
+struct aws_string;
+struct aws_directory_iterator;
+
+enum aws_file_type {
+ AWS_FILE_TYPE_FILE = 1,
+ AWS_FILE_TYPE_SYM_LINK = 2,
+ AWS_FILE_TYPE_DIRECTORY = 4,
+};
+
+struct aws_directory_entry {
+ /**
+ * Absolute path to the entry from the current process root.
+ */
+ struct aws_byte_cursor path;
+ /**
+ * Path to the entry relative to the current working directory.
+ */
+ struct aws_byte_cursor relative_path;
+ /**
+ * Bit-field of enum aws_file_type
+ */
+ int file_type;
+ /**
+ * Size of the file on disk.
+ */
+ int64_t file_size;
+};
+
+/**
+ * Invoked during calls to aws_directory_traverse() as an entry is encountered. entry will contain
+ * the parsed directory entry info.
+ *
+ * Return true to continue the traversal, or alternatively, if you have a reason to abort the traversal, return false.
+ */
+typedef bool(aws_on_directory_entry)(const struct aws_directory_entry *entry, void *user_data);
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Don't use this. It never should have been added in the first place. It's now deprecated.
+ */
+AWS_COMMON_API FILE *aws_fopen(const char *file_path, const char *mode);
+
+/**
+ * Opens file at file_path using mode. Returns the FILE pointer if successful.
+ */
+AWS_COMMON_API FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode);
+
+/**
+ * Creates a directory if it doesn't currently exist. If the directory already exists, it's ignored and assumed
+ * successful.
+ *
+ * Returns AWS_OP_SUCCESS on success. Otherwise, check aws_last_error().
+ */
+AWS_COMMON_API int aws_directory_create(const struct aws_string *dir_path);
+/**
+ * Returns true if the directory currently exists. Otherwise, it returns false.
+ */
+AWS_COMMON_API bool aws_directory_exists(const struct aws_string *dir_path);
+/**
+ * Deletes a directory. If the directory is not empty, this will fail unless the recursive parameter is set to true.
+ * If recursive is true then the entire directory and all of its contents will be deleted. If it is set to false,
+ * the directory will be deleted only if it is empty. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
+ * aws_last_error() will contain the error that occurred. If the directory doesn't exist, AWS_OP_SUCCESS is still
+ * returned.
+ */
+AWS_COMMON_API int aws_directory_delete(const struct aws_string *dir_path, bool recursive);
+/**
+ * Deletes a file. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
+ * aws_last_error() will contain the error that occurred. If the file doesn't exist, AWS_OP_SUCCESS is still returned.
+ */
+AWS_COMMON_API int aws_file_delete(const struct aws_string *file_path);
+
+/**
+ * Moves directory at from to to.
+ * Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
+ * aws_last_error() will contain the error that occurred.
+ */
+AWS_COMMON_API int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to);
+
+/**
+ * Traverse a directory starting at path.
+ *
+ * If you want the traversal to recurse the entire directory, pass recursive as true. Passing false for this parameter
+ * will only iterate the contents of the directory, but will not descend into any directories it encounters.
+ *
+ * If recursive is set to true, the traversal is performed post-order, depth-first
+ * (for practical reasons such as deleting a directory that contains subdirectories or files).
+ *
+ * returns AWS_OP_SUCCESS(0) on success.
+ */
+AWS_COMMON_API int aws_directory_traverse(
+ struct aws_allocator *allocator,
+ const struct aws_string *path,
+ bool recursive,
+ aws_on_directory_entry *on_entry,
+ void *user_data);
+
+/**
+ * Creates a read-only iterator of a directory starting at path. If path is invalid or there's any other error
+ * condition, NULL will be returned. Call aws_last_error() for the exact error in that case.
+ */
+AWS_COMMON_API struct aws_directory_iterator *aws_directory_entry_iterator_new(
+ struct aws_allocator *allocator,
+ const struct aws_string *path);
+
+/**
+ * Moves the iterator to the next entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with
+ * AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available.
+ */
+AWS_COMMON_API int aws_directory_entry_iterator_next(struct aws_directory_iterator *iterator);
+
+/**
+ * Moves the iterator to the previous entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with
+ * AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available.
+ */
+AWS_COMMON_API int aws_directory_entry_iterator_previous(struct aws_directory_iterator *iterator);
+
+/**
+ * Cleanup and deallocate iterator
+ */
+AWS_COMMON_API void aws_directory_entry_iterator_destroy(struct aws_directory_iterator *iterator);
+
+/**
+ * Gets the aws_directory_entry value for iterator at the current position. Returns NULL if the iterator contains no
+ * entries.
+ */
+AWS_COMMON_API const struct aws_directory_entry *aws_directory_entry_iterator_get_value(
+ const struct aws_directory_iterator *iterator);
+
+/**
+ * Returns true iff the character is a directory separator on ANY supported platform.
+ */
+AWS_COMMON_API
+bool aws_is_any_directory_separator(char value);
+
+/**
+ * Returns the directory separator used by the local platform
+ */
+AWS_COMMON_API
+char aws_get_platform_directory_separator(void);
+
+/**
+ * Returns the current user's home directory.
+ */
+AWS_COMMON_API
+struct aws_string *aws_get_home_directory(struct aws_allocator *allocator);
+
+/**
+ * Returns true if a file or path exists, otherwise, false.
+ */
+AWS_COMMON_API
+bool aws_path_exists(const struct aws_string *path);
+
+/*
+ * Wrapper for highest-resolution platform-dependent seek implementation.
+ * Maps to:
+ *
+ * _fseeki64() on windows
+ * fseeko() on linux
+ *
+ * whence can either be SEEK_SET or SEEK_END
+ */
+AWS_COMMON_API
+int aws_fseek(FILE *file, int64_t offset, int whence);
+
+/*
+ * Wrapper for os-specific file length query. We can't use fseek(END, 0)
+ * because support for it is not technically required.
+ *
+ * Unix flavors call fstat, while Windows variants use GetFileSize on a
+ * HANDLE queried from the libc FILE pointer.
+ */
+AWS_COMMON_API
+int aws_file_get_length(FILE *file, int64_t *length);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_COMMON_FILE_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h b/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h
index c4ac55cb64..8135a15495 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h
@@ -12,6 +12,7 @@
#define AWS_COMMON_HASH_TABLE_ITER_CONTINUE (1 << 0)
#define AWS_COMMON_HASH_TABLE_ITER_DELETE (1 << 1)
+#define AWS_COMMON_HASH_TABLE_ITER_ERROR (1 << 2)
/**
* Hash table data structure. This module provides an automatically resizing
@@ -321,6 +322,10 @@ int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_el
* element (if not set, iteration stops)
* # AWS_COMMON_HASH_TABLE_ITER_DELETE - Deletes the current value and
* continues iteration. destroy_fn will NOT be invoked.
+ * # AWS_COMMON_HASH_TABLE_ITER_ERROR - Stop iteration with error.
+ * No action will be taken for the current value and the value before this.
+ * No rolling back. The deleted value before will NOT be back.
+ * aws_hash_table_foreach returns AWS_OP_ERR after stropping the iteration.
*
* Invoking any method which may change the contents of the hashtable
* during iteration results in undefined behavior. However, you may safely
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/json.h b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h
new file mode 100644
index 0000000000..5182bbf132
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h
@@ -0,0 +1,348 @@
+#ifndef AWS_COMMON_JSON_H
+#define AWS_COMMON_JSON_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/common.h>
+
+struct aws_json_value;
+
+// ====================
+// Create and pass type
+
+/**
+ * Creates a new string aws_json_value with the given string and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param string A byte pointer to the string you want to store in the aws_json_value
+ * @param allocator The allocator to use when creating the value
+ * @return A new string aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string);
+
+/**
+ * Creates a new number aws_json_value with the given number and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param number The number you want to store in the aws_json_value
+ * @param allocator The allocator to use when creating the value
+ * @return A new number aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_number(struct aws_allocator *allocator, double number);
+
+/**
+ * Creates a new array aws_json_value and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * Deleting this array will also destroy any aws_json_values it contains.
+ * @param allocator The allocator to use when creating the value
+ * @return A new array aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_array(struct aws_allocator *allocator);
+
+/**
+ * Creates a new boolean aws_json_value with the given boolean and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param boolean The boolean you want to store in the aws_json_value
+ * @param allocator The allocator to use when creating the value
+ * @return A new boolean aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_boolean(struct aws_allocator *allocator, bool boolean);
+
+/**
+ * Creates a new null aws_json_value and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param allocator The allocator to use when creating the value
+ * @return A new null aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_null(struct aws_allocator *allocator);
+
+/**
+ * Creates a new object aws_json_value and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * Deleting this object will also destroy any aws_json_values it contains.
+ * @param allocator The allocator to use when creating the value
+ * @return A new object aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator);
+// ====================
+
+// ====================
+// Value getters
+
+/**
+ * Gets the string of a string aws_json_value.
+ * @param value The string aws_json_value.
+ * @param output The string
+ * @return AWS_OP_SUCESS if the value is a string, otherwise AWS_OP_ERR.
+ */
+AWS_COMMON_API
+int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output);
+
+/**
+ * Gets the number of a number aws_json_value.
+ * @param value The number aws_json_value.
+ * @param output The number
+ * @return AWS_OP_SUCESS if the value is a number, otherwise AWS_OP_ERR.
+ */
+AWS_COMMON_API
+int aws_json_value_get_number(const struct aws_json_value *value, double *output);
+
+/**
+ * Gets the boolean of a boolean aws_json_value.
+ * @param value The boolean aws_json_value.
+ * @param output The boolean
+ * @return AWS_OP_SUCESS if the value is a boolean, otherwise AWS_OP_ERR.
+ */
+AWS_COMMON_API
+int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output);
+// ====================
+
+// ====================
+// Object API
+
+/**
+ * Adds a aws_json_value to a object aws_json_value.
+ *
+ * Note that the aws_json_value will be destroyed when the aws_json_value object is destroyed
+ * by calling "aws_json_destroy()"
+ * @param object The object aws_json_value you want to add a value to.
+ * @param key The key to add the aws_json_value at.
+ * @param value The aws_json_value you want to add.
+ * @return AWS_OP_SUCCESS if adding was successful.
+ * Will return AWS_OP_ERROR if the object passed is invalid or if the passed key
+ * is already in use in the object.
+ */
+AWS_COMMON_API
+int aws_json_value_add_to_object(
+ struct aws_json_value *object,
+ struct aws_byte_cursor key,
+ struct aws_json_value *value);
+
+/**
+ * Returns the aws_json_value at the given key.
+ * @param object The object aws_json_value you want to get the value from.
+ * @param key The key that the aws_json_value is at. Is case sensitive.
+ * @return The aws_json_value at the given key, otherwise NULL.
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_value *object, struct aws_byte_cursor key);
+
+/**
+ * Checks if there is a aws_json_value at the given key.
+ * @param object The value aws_json_value you want to check a key in.
+ * @param key The key that you want to check. Is case sensitive.
+ * @return True if a aws_json_value is found.
+ */
+AWS_COMMON_API
+bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte_cursor key);
+
+/**
+ * Removes the aws_json_value at the given key.
+ * @param object The object aws_json_value you want to remove a aws_json_value in.
+ * @param key The key that the aws_json_value is at. Is case sensitive.
+ * @return AWS_OP_SUCCESS if the aws_json_value was removed.
+ * Will return AWS_OP_ERR if the object passed is invalid or if the value
+ * at the key cannot be found.
+ */
+AWS_COMMON_API
+int aws_json_value_remove_from_object(struct aws_json_value *object, struct aws_byte_cursor key);
+// ====================
+
+// ====================
+// Array API
+
+/**
+ * Adds a aws_json_value to the given array aws_json_value.
+ *
+ * Note that the aws_json_value will be destroyed when the aws_json_value array is destroyed
+ * by calling "aws_json_destroy()"
+ * @param array The array aws_json_value you want to add an aws_json_value to.
+ * @param value The aws_json_value you want to add.
+ * @return AWS_OP_SUCCESS if adding the aws_json_value was successful.
+ * Will return AWS_OP_ERR if the array passed is invalid.
+ */
+AWS_COMMON_API
+int aws_json_value_add_array_element(struct aws_json_value *array, const struct aws_json_value *value);
+
+/**
+ * Returns the aws_json_value at the given index in the array aws_json_value.
+ * @param array The array aws_json_value.
+ * @param index The index of the aws_json_value you want to access.
+ * @return A pointer to the aws_json_value at the given index in the array, otherwise NULL.
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index);
+
+/**
+ * Returns the number of items in the array aws_json_value.
+ * @param array The array aws_json_value.
+ * @return The number of items in the array_json_value.
+ */
+AWS_COMMON_API
+size_t aws_json_get_array_size(const struct aws_json_value *array);
+
+/**
+ * Removes the aws_json_value at the given index in the array aws_json_value.
+ * @param array The array aws_json_value.
+ * @param index The index containing the aws_json_value you want to remove.
+ * @return AWS_OP_SUCCESS if the aws_json_value at the index was removed.
+ * Will return AWS_OP_ERR if the array passed is invalid or if the index
+ * passed is out of range.
+ */
+AWS_COMMON_API
+int aws_json_value_remove_array_element(struct aws_json_value *array, size_t index);
+// ====================
+
+// ====================
+// Checks
+
+/**
+ * Checks if the aws_json_value is a string.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a string aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_string(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a number.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a number aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_number(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a array.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a array aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_array(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a boolean.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a boolean aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_boolean(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a null aws_json_value.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a null aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_null(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a object aws_json_value.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a object aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_object(const struct aws_json_value *value);
+// ====================
+
+// ====================
+// Memory Management
+
+/**
+ * Initializes the JSON module for use.
+ * @param allocator The allocator to use for creating aws_json_value structs.
+ */
+AWS_COMMON_API
+void aws_json_module_init(struct aws_allocator *allocator);
+
+/**
+ * Cleans up the JSON module. Should be called when finished using the module.
+ */
+AWS_COMMON_API
+void aws_json_module_cleanup(void);
+
+/**
+ * Removes the aws_json_value from memory. If the aws_json_value is a object or array, it will also destroy
+ * attached aws_json_values as well.
+ *
+ * For example, if you called "aws_json_array_add(b, a)" to add an object "a" to an array "b", if you call
+ * "aws_json_destroy(b)" then it will also free "a" automatically. All children/attached aws_json_values are freed
+ * when the parent/root aws_json_value is destroyed.
+ * @param value The aws_json_value to destroy.
+ */
+AWS_COMMON_API
+void aws_json_value_destroy(struct aws_json_value *value);
+// ====================
+
+// ====================
+// Utility
+
+/**
+ * Appends a unformatted JSON string representation of the aws_json_value into the passed byte buffer.
+ * The byte buffer is expected to be already initialized so the function can append the JSON into it.
+ *
+ * Note: The byte buffer will automatically have its size extended if the JSON string is over the byte
+ * buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not
+ * have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned.
+ *
+ * Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free
+ * the memory used, as it will NOT be called automatically.
+ * @param value The aws_json_value to format.
+ * @param output The destination for the JSON string
+ * @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors
+ * Will return AWS_OP_ERR if the value passed is not an aws_json_value or if there
+ * was an error appending the JSON into the byte buffer.
+ */
+AWS_COMMON_API
+int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output);
+
+/**
+ * Appends a formatted JSON string representation of the aws_json_value into the passed byte buffer.
+ * The byte buffer is expected to already be initialized so the function can append the JSON into it.
+ *
+ * Note: The byte buffer will automatically have its size extended if the JSON string is over the byte
+ * buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not
+ * have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned.
+ *
+ * Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free
+ * the memory used, as it will NOT be called automatically.
+ * @param value The aws_json_value to format.
+ * @param output The destination for the JSON string
+ * @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors
+ * Will return AWS_ERROR_INVALID_ARGUMENT if the value passed is not an aws_json_value or if there
+ * aws an error appending the JSON into the byte buffer.
+ */
+AWS_COMMON_API
+int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output);
+
+/**
+ * Parses the JSON string and returns a aws_json_value containing the root of the JSON.
+ * @param allocator The allocator used to create the value
+ * @param string The string containing the JSON.
+ * @return The root aws_json_value of the JSON.
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string);
+// ====================
+
+#endif // AWS_COMMON_JSON_H
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
index 5015d673e0..1b34e3bae3 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
@@ -6,6 +6,7 @@
* SPDX-License-Identifier: Apache-2.0.
*/
+#include <aws/common/atomics.h>
#include <aws/common/common.h>
#include <aws/common/thread.h>
@@ -80,6 +81,9 @@ enum aws_common_log_subject {
AWS_LS_COMMON_THREAD,
AWS_LS_COMMON_MEMTRACE,
AWS_LS_COMMON_XML_PARSER,
+ AWS_LS_COMMON_IO,
+ AWS_LS_COMMON_BUS,
+ AWS_LS_COMMON_TEST,
AWS_LS_COMMON_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
@@ -109,6 +113,7 @@ struct aws_logger_vtable {
;
enum aws_log_level (*const get_log_level)(struct aws_logger *logger, aws_log_subject_t subject);
void (*const clean_up)(struct aws_logger *logger);
+ int (*set_log_level)(struct aws_logger *logger, enum aws_log_level);
};
struct aws_logger {
@@ -189,7 +194,7 @@ struct aws_logger_pipeline {
struct aws_log_channel *channel;
struct aws_log_writer *writer;
struct aws_allocator *allocator;
- enum aws_log_level level;
+ struct aws_atomic_var level;
};
/**
@@ -224,6 +229,15 @@ AWS_COMMON_API
void aws_logger_clean_up(struct aws_logger *logger);
/**
+ * Sets the current logging level for the logger. Loggers are not require to support this.
+ * @param logger logger to set the log level for
+ * @param level new log level for the logger
+ * @return AWS_OP_SUCCESS if the level was successfully set, AWS_OP_ERR otherwise
+ */
+AWS_COMMON_API
+int aws_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level);
+
+/**
* Converts a log level to a c-string constant. Intended primarily to support building log lines that
* include the level in them, i.e.
*
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
index 4bd7e028d1..48f90ad501 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
@@ -75,6 +75,16 @@ AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3);
# endif /* defined(__GNUC__) || defined(__clang__) */
#endif /* defined(_MSC_VER) */
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer)
+# define AWS_SUPPRESS_ASAN __attribute__((no_sanitize("address")))
+# endif
+#endif
+
+#if !defined(AWS_SUPPRESS_ASAN)
+# define AWS_SUPPRESS_ASAN
+#endif
+
/* If this is C++, restrict isn't supported. If this is not at least C99 on gcc and clang, it isn't supported.
* If visual C++ building in C mode, the restrict definition is __restrict.
* This just figures all of that out based on who's including this header file. */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h b/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h
index 73c2ecfa55..edb91864a8 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h
@@ -57,6 +57,7 @@ int aws_mutex_lock(struct aws_mutex *mutex);
* Attempts to acquire the lock but returns immediately if it can not.
* While on some platforms such as Windows, this may behave as a reentrant mutex,
* you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be.
+ * Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps]
*/
AWS_COMMON_API
int aws_mutex_try_lock(struct aws_mutex *mutex);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h b/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h
index c8be19c7d6..b1e16a1e7c 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h
@@ -10,11 +10,20 @@
#ifdef _WIN32
# define AWS_OS_WINDOWS
+/* indicate whether this is for Windows desktop, or UWP or Windows S, or other Windows-like devices */
+# if defined(AWS_HAVE_WINAPI_DESKTOP)
+# define AWS_OS_WINDOWS_DESKTOP
+# endif
+
#elif __APPLE__
# define AWS_OS_APPLE
# include "TargetConditionals.h"
# if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
# define AWS_OS_IOS
+# elif defined(TARGET_OS_WATCH) && TARGET_OS_WATCH
+# define AWS_OS_WATCHOS
+# elif defined(TARGET_OS_TV) && TARGET_OS_TV
+# define AWS_OS_TVOS
# else
# define AWS_OS_MACOS
# endif
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h b/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h
index 8859729346..a4df8c5061 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h
@@ -41,7 +41,7 @@ struct aws_priority_queue {
};
struct aws_priority_queue_node {
- /** The current index of the node in queuesion, or SIZE_MAX if the node has been removed. */
+ /** The current index of the node in question, or SIZE_MAX if the node has been removed. */
size_t current_index;
};
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h b/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h
index 943f6cb98d..c9a90897c8 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h
@@ -10,7 +10,13 @@
*/
#define AWS_MPOL_PREFERRED_ALIAS 1
+struct bitmask;
+
extern long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long);
+extern int (*g_numa_available_ptr)(void);
+extern int (*g_numa_num_configured_nodes_ptr)(void);
+extern int (*g_numa_num_possible_cpus_ptr)(void);
+extern int (*g_numa_node_of_cpu_ptr)(int cpu);
extern void *g_libnuma_handle;
#endif /* AWS_COMMON_PRIVATE_DLLOADS_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
index b18a3cc97f..50b269fc7b 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
@@ -1,5 +1,8 @@
#ifndef AWS_COMMON_PRIVATE_LOOKUP3_INL
#define AWS_COMMON_PRIVATE_LOOKUP3_INL
+
+#include <aws/common/macros.h>
+
/* clang-format off */
/*
@@ -498,6 +501,7 @@ static void hashlittle2(
size_t length, /* length of the key */
uint32_t *pc, /* IN: primary initval, OUT: primary hash */
uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */
+ AWS_SUPPRESS_ASAN /* AddressSanitizer hates this implementation, even though it's innocuous */
{
uint32_t a,b,c; /* internal state */
union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h b/contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h
new file mode 100644
index 0000000000..ca263e56c9
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h
@@ -0,0 +1,39 @@
+#ifndef AWS_COMMON_PRIVATE_THREAD_SHARED_H
+#define AWS_COMMON_PRIVATE_THREAD_SHARED_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/thread.h>
+
+struct aws_linked_list;
+struct aws_linked_list_node;
+
+/**
+ * Iterates a list of thread wrappers, joining against each corresponding thread, and freeing the wrapper once
+ * the join has completed. Do not hold the managed thread lock when invoking this function, instead swap the
+ * pending join list into a local and call this on the local.
+ *
+ * @param wrapper_list list of thread wrappers to join and free
+ */
+AWS_COMMON_API void aws_thread_join_and_free_wrapper_list(struct aws_linked_list *wrapper_list);
+
+/**
+ * Adds a thread (wrapper embedding a linked list node) to the global list of threads that have run to completion
+ * and need a join in order to know that the OS has truly finished with the thread.
+ * @param node linked list node embedded in the thread wrapper
+ */
+AWS_COMMON_API void aws_thread_pending_join_add(struct aws_linked_list_node *node);
+
+/**
+ * Initializes the managed thread system. Called during library init.
+ */
+AWS_COMMON_API void aws_thread_initialize_thread_management(void);
+
+/**
+ * Gets the current managed thread count
+ */
+AWS_COMMON_API size_t aws_thread_get_managed_thread_count(void);
+
+#endif /* AWS_COMMON_PRIVATE_THREAD_SHARED_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/promise.h b/contrib/restricted/aws/aws-c-common/include/aws/common/promise.h
new file mode 100644
index 0000000000..e19d858c72
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/promise.h
@@ -0,0 +1,95 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_COMMON_PROMISE_H
+#define AWS_COMMON_PROMISE_H
+
+#include <aws/common/common.h>
+
+/*
+ * Standard promise interface. Promise can be waited on by multiple threads, and as long as it is
+ * ref-counted correctly, will provide the resultant value/error code to all waiters.
+ * All promise API calls are internally thread-safe.
+ */
+struct aws_promise;
+/*
+ * Creates a new promise
+ */
+AWS_COMMON_API
+struct aws_promise *aws_promise_new(struct aws_allocator *allocator);
+
+/*
+ * Indicate a new reference to a promise. At minimum, each new thread making use of the promise should
+ * acquire it.
+ */
+AWS_COMMON_API
+struct aws_promise *aws_promise_acquire(struct aws_promise *promise);
+
+/*
+ * Releases a reference on the promise. When the refcount hits 0, the promise is cleaned up and freed.
+ */
+AWS_COMMON_API
+void aws_promise_release(struct aws_promise *promise);
+
+/*
+ * Waits infinitely for the promise to be completed
+ */
+AWS_COMMON_API
+void aws_promise_wait(struct aws_promise *promise);
+/*
+ * Waits for the requested time in nanoseconds. Returns true if the promise was completed.
+ */
+AWS_COMMON_API
+bool aws_promise_wait_for(struct aws_promise *promise, size_t nanoseconds);
+
+/*
+ * Completes the promise and stores the result along with an optional destructor. If the value
+ * is not taken via `aws_promise_take_value`, it will be destroyed when the promise's reference
+ * count reaches zero.
+ * NOTE: Promise cannot be completed twice
+ */
+AWS_COMMON_API
+void aws_promise_complete(struct aws_promise *promise, void *value, void (*dtor)(void *));
+
+/*
+ * Completes the promise and stores the error code
+ * NOTE: Promise cannot be completed twice
+ */
+AWS_COMMON_API
+void aws_promise_fail(struct aws_promise *promise, int error_code);
+
+/*
+ * Returns whether or not the promise has completed (regardless of success or failure)
+ */
+AWS_COMMON_API
+bool aws_promise_is_complete(struct aws_promise *promise);
+
+/*
+ * Returns the error code recorded if the promise failed, or 0 if it succeeded
+ * NOTE: It is fatal to attempt to retrieve the error code before the promise is completed
+ */
+AWS_COMMON_API
+int aws_promise_error_code(struct aws_promise *promise);
+
+/*
+ * Returns the value provided to the promise if it succeeded, or NULL if none was provided
+ * or the promise failed. Check `aws_promise_error_code` to be sure.
+ * NOTE: The ownership of the value is retained by the promise.
+ * NOTE: It is fatal to attempt to retrieve the value before the promise is completed
+ */
+AWS_COMMON_API
+void *aws_promise_value(struct aws_promise *promise);
+
+/*
+ * Returns the value provided to the promise if it succeeded, or NULL if none was provided
+ * or the promise failed. Check `aws_promise_error_code` to be sure.
+ * NOTE: The promise relinquishes ownership of the value, the caller is now responsible for
+ * freeing any resources associated with the value
+ * NOTE: It is fatal to attempt to take the value before the promise is completed
+ */
+AWS_COMMON_API
+void *aws_promise_take_value(struct aws_promise *promise);
+
+#endif // AWS_COMMON_PROMISE_H
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h b/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h
index 71b33b892f..596ec2c497 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h
@@ -54,46 +54,6 @@ AWS_COMMON_API void *aws_ref_count_acquire(struct aws_ref_count *ref_count);
*/
AWS_COMMON_API size_t aws_ref_count_release(struct aws_ref_count *ref_count);
-/**
- * Utility function that returns when all auxiliary threads created by crt types (event loop groups and
- * host resolvers) have completed and those types have completely cleaned themselves up. The actual cleanup
- * process may be invoked as a part of a spawned thread, but the wait will not get signalled until that cleanup
- * thread is in its at_exit callback processing loop with no outstanding memory allocations.
- *
- * Primarily used by tests to guarantee that everything is cleaned up before performing a memory check.
- */
-AWS_COMMON_API void aws_global_thread_creator_shutdown_wait(void);
-
-/**
- * Utility function that returns when all auxiliary threads created by crt types (event loop groups and
- * host resolvers) have completed and those types have completely cleaned themselves up. The actual cleanup
- * process may be invoked as a part of a spawned thread, but the wait will not get signalled until that cleanup
- * thread is in its at_exit callback processing loop with no outstanding memory allocations.
- *
- * Primarily used by tests to guarantee that everything is cleaned up before performing a memory check.
- *
- * Returns AWS_OP_SUCCESS if the conditional wait terminated properly, AWS_OP_ERR otherwise (timeout, etc..)
- */
-AWS_COMMON_API int aws_global_thread_creator_shutdown_wait_for(uint32_t wait_timeout_in_seconds);
-
-/**
- * Increments the global thread creator count. Currently invoked on event loop group and host resolver creation.
- *
- * Tracks the number of outstanding thread-creating objects (not the total number of threads generated).
- * Currently this is the number of aws_host_resolver and aws_event_loop_group objects that have not yet been
- * fully cleaned up.
- */
-AWS_COMMON_API void aws_global_thread_creator_increment(void);
-
-/**
- * Decrements the global thread creator count. Currently invoked on event loop group and host resolver destruction.
- *
- * Tracks the number of outstanding thread-creating objects (not the total number of threads generated).
- * Currently this is the number of aws_host_resolver and aws_event_loop_group objects that have not yet been
- * fully cleaned up.
- */
-AWS_COMMON_API void aws_global_thread_creator_decrement(void);
-
AWS_EXTERN_C_END
#endif /* AWS_COMMON_REF_COUNT_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h b/contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h
deleted file mode 100644
index 9d636e23ff..0000000000
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-#ifndef AWS_COMMON_RESOURCE_NAME_H
-#define AWS_COMMON_RESOURCE_NAME_H
-#pragma once
-
-#include <aws/common/byte_buf.h>
-#include <aws/common/common.h>
-
-struct aws_resource_name {
- struct aws_byte_cursor partition;
- struct aws_byte_cursor service;
- struct aws_byte_cursor region;
- struct aws_byte_cursor account_id;
- struct aws_byte_cursor resource_id;
-};
-
-AWS_EXTERN_C_BEGIN
-
-/**
- Given an ARN "Amazon Resource Name" represented as an in memory a
- structure representing the parts
-*/
-AWS_COMMON_API
-int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input);
-
-/**
- Calculates the space needed to write an ARN to a byte buf
-*/
-AWS_COMMON_API
-int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size);
-
-/**
- Serializes an ARN structure into the lexical string format
-*/
-AWS_COMMON_API
-int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn);
-
-AWS_EXTERN_C_END
-
-#endif /* AWS_COMMON_RESOURCE_NAME_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h
index d3e7b6da5a..9f9a1499e3 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h
@@ -92,12 +92,6 @@ AWS_COMMON_API bool aws_ring_buffer_buf_belongs_to_pool(
const struct aws_byte_buf *buf);
/**
- * Initializes the supplied allocator to be based on the provided ring buffer. Allocations must be allocated
- * and freed in the same order, or the ring buffer will assert.
- */
-AWS_COMMON_API int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer);
-
-/**
* Cleans up a ring buffer allocator instance. Does not clean up the ring buffer.
*/
AWS_COMMON_API void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl
index 7ce79a68d9..34e76a4a13 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl
@@ -14,7 +14,7 @@ AWS_EXTERN_C_BEGIN
AWS_STATIC_IMPL bool aws_ring_buffer_check_atomic_ptr(
const struct aws_ring_buffer *ring_buf,
const uint8_t *atomic_ptr) {
- return (atomic_ptr >= ring_buf->allocation && atomic_ptr <= ring_buf->allocation_end);
+ return ((atomic_ptr != NULL) && (atomic_ptr >= ring_buf->allocation && atomic_ptr <= ring_buf->allocation_end));
}
/**
@@ -37,8 +37,8 @@ AWS_STATIC_IMPL bool aws_ring_buffer_is_valid(const struct aws_ring_buffer *ring
bool tail_in_range = aws_ring_buffer_check_atomic_ptr(ring_buf, tail);
/* if head points-to the first element of the buffer then tail must too */
bool valid_head_tail = (head != ring_buf->allocation) || (tail == ring_buf->allocation);
- return ring_buf && AWS_MEM_IS_READABLE(ring_buf->allocation, ring_buf->allocation_end - ring_buf->allocation) &&
- head_in_range && tail_in_range && valid_head_tail && (ring_buf->allocator != NULL);
+ return ring_buf && (ring_buf->allocation != NULL) && head_in_range && tail_in_range && valid_head_tail &&
+ (ring_buf->allocator != NULL);
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_RING_BUFFER_INL */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h b/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h
index 64863d2c28..01c257dfba 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h
@@ -54,6 +54,7 @@ AWS_COMMON_API int aws_rw_lock_wlock(struct aws_rw_lock *lock);
* Attempts to acquire the lock but returns immediately if it can not.
* While on some platforms such as Windows, this may behave as a reentrant mutex,
* you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be.
+ * Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps]
*/
AWS_COMMON_API int aws_rw_lock_try_rlock(struct aws_rw_lock *lock);
AWS_COMMON_API int aws_rw_lock_try_wlock(struct aws_rw_lock *lock);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h b/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h
index 8249684710..6775960f0b 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h
@@ -85,16 +85,9 @@ AWS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(void *));
AWS_STATIC_ASSERT(sizeof(char) == 1);
#endif /* NO_STDINT */
-#if defined(_MSC_VER)
+/**
+ * @deprecated Use int64_t instead for offsets in public APIs.
+ */
typedef int64_t aws_off_t;
-#else
-# if _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L
-typedef off_t aws_off_t;
-# else
-typedef long aws_off_t;
-# endif /* _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L */
-#endif /* defined(_MSC_VER) */
-
-AWS_STATIC_ASSERT(sizeof(int64_t) >= sizeof(aws_off_t));
#endif /* AWS_COMMON_STDINT_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/string.h b/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
index 58eba5baf7..c73a24ad4a 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
@@ -40,16 +40,133 @@
#endif
struct aws_string {
struct aws_allocator *const allocator;
+ /* size in bytes of `bytes` minus any null terminator.
+ * NOTE: This is not the number of characters in the string. */
const size_t len;
/* give this a storage specifier for C++ purposes. It will likely be larger after init. */
const uint8_t bytes[1];
};
+
+#ifdef AWS_OS_WINDOWS
+struct aws_wstring {
+ struct aws_allocator *const allocator;
+ /* number of characters in the string not including the null terminator. */
+ const size_t len;
+ /* give this a storage specifier for C++ purposes. It will likely be larger after init. */
+ const wchar_t bytes[1];
+};
+#endif /* AWS_OS_WINDOWS */
+
#ifdef _MSC_VER
# pragma warning(pop)
#endif
AWS_EXTERN_C_BEGIN
+#ifdef AWS_OS_WINDOWS
+/**
+ * For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop.
+ *
+ * Note: `to_convert` is assumed to be UTF-8 or ASCII.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wstring(
+ struct aws_allocator *allocator,
+ const struct aws_string *to_convert);
+
+/**
+ * For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop.
+ *
+ * Note: `to_convert` is assumed to be UTF-8 or ASCII.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wchar_from_byte_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *to_convert);
+
+/**
+ * clean up str.
+ */
+AWS_COMMON_API
+void aws_wstring_destroy(struct aws_wstring *str);
+
+/**
+ * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
+ *
+ * Note: `to_convert` is assumed to be wchar already.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_str(
+ struct aws_allocator *allocator,
+ const struct aws_wstring *to_convert);
+
+/**
+ * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
+ *
+ * Note: `to_convert` is assumed to be wchar already.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_byte_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *to_convert);
+
+/**
+ * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
+ *
+ * Note: `to_convert` is assumed to be wchar already.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_c_str(
+ struct aws_allocator *allocator,
+ const wchar_t *to_convert);
+
+/**
+ * Create a new wide string from a byte cursor. This assumes that w_str_cur is already in utf-16.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *w_str_cur);
+
+/**
+ * Create a new wide string from a utf-16 string enclosing array. The length field is in number of characters not
+ * counting the null terminator.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_array(
+ struct aws_allocator *allocator,
+ const wchar_t *w_str,
+ size_t length);
+
+/**
+ * Returns a wchar_t * pointer for use with windows OS interop.
+ */
+AWS_COMMON_API const wchar_t *aws_wstring_c_str(const struct aws_wstring *str);
+
+/**
+ * Returns the number of characters in the wchar string. NOTE: This is not the length in bytes or the buffer size.
+ */
+AWS_COMMON_API size_t aws_wstring_num_chars(const struct aws_wstring *str);
+
+/**
+ * Returns the length in bytes for the buffer.
+ */
+AWS_COMMON_API size_t aws_wstring_size_bytes(const struct aws_wstring *str);
+
+/**
+ * Verifies that str is a valid string. Returns true if it's valid and false otherwise.
+ */
+AWS_COMMON_API bool aws_wstring_is_valid(const struct aws_wstring *str);
+
+#endif /* AWS_OS_WINDOWS */
+
/**
* Returns true if bytes of string are the same, false otherwise.
*/
@@ -212,7 +329,7 @@ struct aws_byte_cursor aws_byte_cursor_from_string(const struct aws_string *src)
AWS_COMMON_API
struct aws_string *aws_string_clone_or_reuse(struct aws_allocator *allocator, const struct aws_string *str);
-/* Computes the length of a c string in bytes assuming the character set is either ASCII or UTF-8. If no NULL character
+/** Computes the length of a c string in bytes assuming the character set is either ASCII or UTF-8. If no NULL character
* is found within max_read_len of str, AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED is raised. Otherwise, str_len
* will contain the string length minus the NULL character, and AWS_OP_SUCCESS will be returned. */
AWS_COMMON_API
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h b/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h
index 4143fed56b..5b6600e939 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h
@@ -14,6 +14,11 @@ enum aws_platform_os {
AWS_PLATFORM_OS_UNIX,
};
+struct aws_cpu_info {
+ int32_t cpu_id;
+ bool suspected_hyper_thread;
+};
+
AWS_EXTERN_C_BEGIN
/* Returns the OS this was built under */
@@ -24,6 +29,25 @@ enum aws_platform_os aws_get_platform_build_os(void);
AWS_COMMON_API
size_t aws_system_info_processor_count(void);
+/**
+ * Returns the logical processor groupings on the system (such as multiple numa nodes).
+ */
+AWS_COMMON_API
+uint16_t aws_get_cpu_group_count(void);
+
+/**
+ * For a group, returns the number of CPUs it contains.
+ */
+AWS_COMMON_API
+size_t aws_get_cpu_count_for_group(uint16_t group_idx);
+
+/**
+ * Fills in cpu_ids_array with the cpu_id's for the group. To obtain the size to allocate for cpu_ids_array
+ * and the value for argument for cpu_ids_array_length, call aws_get_cpu_count_for_group().
+ */
+AWS_COMMON_API
+void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length);
+
/* Returns true if a debugger is currently attached to the process. */
AWS_COMMON_API
bool aws_is_debugger_present(void);
@@ -74,7 +98,7 @@ void aws_backtrace_print(FILE *fp, void *call_site_data);
/* Log the callstack from the current stack to the currently configured aws_logger */
AWS_COMMON_API
-void aws_backtrace_log(void);
+void aws_backtrace_log(int log_level);
AWS_EXTERN_C_END
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h b/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h
index 1c78fd3e51..24a5cc60d4 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h
@@ -33,7 +33,12 @@ struct aws_task {
struct aws_linked_list_node node;
struct aws_priority_queue_node priority_queue_node;
const char *type_tag;
- size_t reserved;
+
+ /* honor the ABI compat */
+ union {
+ bool scheduled;
+ size_t reserved;
+ } abi_extension;
};
struct aws_task_scheduler {
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h b/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
index e7abd79f7e..49e5241748 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
@@ -15,6 +15,41 @@ enum aws_thread_detach_state {
AWS_THREAD_NOT_CREATED = 1,
AWS_THREAD_JOINABLE,
AWS_THREAD_JOIN_COMPLETED,
+ AWS_THREAD_MANAGED,
+};
+
+/**
+ * Specifies the join strategy used on an aws_thread, which in turn controls whether or not a thread participates
+ * in the managed thread system. The managed thread system provides logic to guarantee a join on all participating
+ * threads at the cost of laziness (the user cannot control when joins happen).
+ *
+ * Manual - thread does not particpate in the managed thread system; any joins must be done by the user. This
+ * is the default. The user must call aws_thread_clean_up(), but only after any desired join operation has completed.
+ * Not doing so will cause the windows handle to leak.
+ *
+ * Managed - the managed thread system will automatically perform a join some time after the thread's run function
+ * has completed. It is an error to call aws_thread_join on a thread configured with the managed join strategy. The
+ * managed thread system will call aws_thread_clean_up() on the thread after the background join has completed.
+ *
+ * Additionally, an API exists, aws_thread_join_all_managed(), which blocks and returns when all outstanding threads
+ * with the managed strategy have fully joined. This API is useful for tests (rather than waiting for many individual
+ * signals) and program shutdown or DLL unload. This API is automatically invoked by the common library clean up
+ * function. If the common library clean up is called from a managed thread, this will cause deadlock.
+ *
+ * Lazy thread joining is done only when threads finish their run function or when the user calls
+ * aws_thread_join_all_managed(). This means it may be a long time between thread function completion and the join
+ * being applied, but the queue of unjoined threads is always one or fewer so there is no critical resource
+ * backlog.
+ *
+ * Currently, only event loop group async cleanup and host resolver threads participate in the managed thread system.
+ * Additionally, event loop threads will increment and decrement the pending join count (they are manually joined
+ * internally) in order to have an accurate view of internal thread usage and also to prevent failure to release
+ * an event loop group fully from allowing aws_thread_join_all_managed() from running to completion when its
+ * intent is such that it should block instead.
+ */
+enum aws_thread_join_strategy {
+ AWS_TJS_MANUAL = 0,
+ AWS_TJS_MANAGED,
};
struct aws_thread_options {
@@ -30,6 +65,8 @@ struct aws_thread_options {
* On Apple and Android platforms, this setting doesn't do anything at all.
*/
int32_t cpu_id;
+
+ enum aws_thread_join_strategy join_strategy;
};
#ifdef _WIN32
@@ -81,7 +118,11 @@ int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator);
/**
* Creates an OS level thread and associates it with func. context will be passed to func when it is executed.
* options will be applied to the thread if they are applicable for the platform.
- * You must either call join or detach after creating the thread and before calling clean_up.
+ *
+ * After launch, you may join on the thread. A successfully launched thread must have clean_up called on it in order
+ * to avoid a handle leak. If you do not join before calling clean_up, the thread will become detached.
+ *
+ * Managed threads must not have join or clean_up called on them by external code.
*/
AWS_COMMON_API
int aws_thread_launch(
@@ -105,13 +146,31 @@ enum aws_thread_detach_state aws_thread_get_detach_state(struct aws_thread *thre
/**
* Joins the calling thread to a thread instance. Returns when thread is
- * finished.
+ * finished. Calling this from the associated OS thread will cause a deadlock.
*/
AWS_COMMON_API
int aws_thread_join(struct aws_thread *thread);
/**
- * Cleans up the thread handle. Either detach or join must be called
+ * Blocking call that waits for all managed threads to complete their join call. This can only be called
+ * from the main thread or a non-managed thread.
+ *
+ * This gets called automatically from library cleanup.
+ *
+ * By default the wait is unbounded, but that default can be overridden via aws_thread_set_managed_join_timeout_ns()
+ */
+AWS_COMMON_API
+int aws_thread_join_all_managed(void);
+
+/**
+ * Overrides how long, in nanoseconds, that aws_thread_join_all_managed will wait for threads to complete.
+ * A value of zero will result in an unbounded wait.
+ */
+AWS_COMMON_API
+void aws_thread_set_managed_join_timeout_ns(uint64_t timeout_in_ns);
+
+/**
+ * Cleans up the thread handle. Don't call this on a managed thread. If you wish to join the thread, you must join
* before calling this function.
*/
AWS_COMMON_API
@@ -146,6 +205,24 @@ typedef void(aws_thread_atexit_fn)(void *user_data);
AWS_COMMON_API
int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data);
+/**
+ * Increments the count of unjoined threads in the managed thread system. Used by managed threads and
+ * event loop threads. Additional usage requires the user to join corresponding threads themselves and
+ * correctly increment/decrement even in the face of launch/join errors.
+ *
+ * aws_thread_join_all_managed() will not return until this count has gone to zero.
+ */
+AWS_COMMON_API void aws_thread_increment_unjoined_count(void);
+
+/**
+ * Decrements the count of unjoined threads in the managed thread system. Used by managed threads and
+ * event loop threads. Additional usage requires the user to join corresponding threads themselves and
+ * correctly increment/decrement even in the face of launch/join errors.
+ *
+ * aws_thread_join_all_managed() will not return until this count has gone to zero.
+ */
+AWS_COMMON_API void aws_thread_decrement_unjoined_count(void);
+
AWS_EXTERN_C_END
#endif /* AWS_COMMON_THREAD_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h b/contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h
new file mode 100644
index 0000000000..5457aa2d7b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h
@@ -0,0 +1,60 @@
+#ifndef AWS_COMMON_THREAD_SCHEDULER_H
+#define AWS_COMMON_THREAD_SCHEDULER_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+
+struct aws_thread_scheduler;
+struct aws_thread_options;
+struct aws_task;
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates a new instance of a thread scheduler. This object receives scheduled tasks and executes them inside a
+ * background thread. On success, this function returns an instance with a ref-count of 1. On failure it returns NULL.
+ *
+ * thread_options are optional.
+ *
+ * The semantics of this interface conform to the semantics of aws_task_scheduler.
+ */
+AWS_COMMON_API
+struct aws_thread_scheduler *aws_thread_scheduler_new(
+ struct aws_allocator *allocator,
+ const struct aws_thread_options *thread_options);
+
+/**
+ * Acquire a reference to the scheduler.
+ */
+AWS_COMMON_API void aws_thread_scheduler_acquire(struct aws_thread_scheduler *scheduler);
+
+/**
+ * Release a reference to the scheduler.
+ */
+AWS_COMMON_API void aws_thread_scheduler_release(const struct aws_thread_scheduler *scheduler);
+
+/**
+ * Schedules a task to run in the future. time_to_run is the absolute time from the system hw_clock.
+ */
+AWS_COMMON_API void aws_thread_scheduler_schedule_future(
+ struct aws_thread_scheduler *scheduler,
+ struct aws_task *task,
+ uint64_t time_to_run);
+
+/**
+ * Schedules a task to run as soon as possible.
+ */
+AWS_COMMON_API void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task);
+
+/**
+ * Cancel a task that has been scheduled. The cancellation callback will be invoked in the background thread.
+ * This function is slow, so please don't do it in the hot path for your code.
+ */
+AWS_COMMON_API void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_COMMON_THREAD_SCHEDULER_H */
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator.c b/contrib/restricted/aws/aws-c-common/source/allocator.c
index 6ffb531509..a672662470 100644
--- a/contrib/restricted/aws/aws-c-common/source/allocator.c
+++ b/contrib/restricted/aws/aws-c-common/source/allocator.c
@@ -25,6 +25,10 @@
# pragma warning(disable : 4100)
#endif
+#ifndef PAGE_SIZE
+# define PAGE_SIZE (4 * 1024)
+#endif
+
bool aws_allocator_is_valid(const struct aws_allocator *alloc) {
/* An allocator must define mem_acquire and mem_release. All other fields are optional */
return alloc && AWS_OBJECT_PTR_IS_READABLE(alloc) && alloc->mem_acquire && alloc->mem_release;
@@ -32,23 +36,74 @@ bool aws_allocator_is_valid(const struct aws_allocator *alloc) {
static void *s_default_malloc(struct aws_allocator *allocator, size_t size) {
(void)allocator;
- return malloc(size);
+ /* larger allocations should be aligned so that AVX and friends can avoid
+ * the extra preable during unaligned versions of memcpy/memset on big buffers
+ * This will also accelerate hardware CRC and SHA on ARM chips
+ *
+ * 64 byte alignment for > page allocations on 64 bit systems
+ * 32 byte alignment for > page allocations on 32 bit systems
+ * 16 byte alignment for <= page allocations on 64 bit systems
+ * 8 byte alignment for <= page allocations on 32 bit systems
+ *
+ * We use PAGE_SIZE as the boundary because we are not aware of any allocations of
+ * this size or greater that are not data buffers
+ */
+ const size_t alignment = sizeof(void *) * (size > PAGE_SIZE ? 8 : 2);
+#if !defined(_WIN32)
+ void *result = NULL;
+ int err = posix_memalign(&result, alignment, size);
+ (void)err;
+ AWS_PANIC_OOM(result, "posix_memalign failed to allocate memory");
+ return result;
+#else
+ void *mem = _aligned_malloc(size, alignment);
+ AWS_FATAL_POSTCONDITION(mem && "_aligned_malloc failed to allocate memory");
+ return mem;
+#endif
}
static void s_default_free(struct aws_allocator *allocator, void *ptr) {
(void)allocator;
+#if !defined(_WIN32)
free(ptr);
+#else
+ _aligned_free(ptr);
+#endif
}
static void *s_default_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) {
(void)allocator;
(void)oldsize;
- return realloc(ptr, newsize);
+ AWS_FATAL_PRECONDITION(newsize);
+
+#if !defined(_WIN32)
+ if (newsize <= oldsize) {
+ return ptr;
+ }
+
+ /* newsize is > oldsize, need more memory */
+ void *new_mem = s_default_malloc(allocator, newsize);
+ AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in s_default_malloc");
+
+ if (ptr) {
+ memcpy(new_mem, ptr, oldsize);
+ s_default_free(allocator, ptr);
+ }
+
+ return new_mem;
+#else
+ const size_t alignment = sizeof(void *) * (newsize > PAGE_SIZE ? 8 : 2);
+ void *new_mem = _aligned_realloc(ptr, newsize, alignment);
+ AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in _aligned_realloc");
+ return new_mem;
+#endif
}
static void *s_default_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- (void)allocator;
- return calloc(num, size);
+ void *mem = s_default_malloc(allocator, num * size);
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in s_default_malloc");
+ memset(mem, 0, num * size);
+ return mem;
}
static struct aws_allocator default_allocator = {
@@ -69,9 +124,8 @@ void *aws_mem_acquire(struct aws_allocator *allocator, size_t size) {
AWS_FATAL_PRECONDITION(size != 0);
void *mem = allocator->mem_acquire(allocator, size);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator");
+
return mem;
}
@@ -84,28 +138,21 @@ void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
/* Defensive check: never use calloc with size * num that would overflow
* https://wiki.sei.cmu.edu/confluence/display/c/MEM07-C.+Ensure+that+the+arguments+to+calloc%28%29%2C+when+multiplied%2C+do+not+wrap
*/
- size_t required_bytes;
- if (aws_mul_size_checked(num, size, &required_bytes)) {
- return NULL;
- }
+ size_t required_bytes = 0;
+ AWS_FATAL_POSTCONDITION(!aws_mul_size_checked(num, size, &required_bytes), "calloc computed size > SIZE_MAX");
/* If there is a defined calloc, use it */
if (allocator->mem_calloc) {
void *mem = allocator->mem_calloc(allocator, num, size);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator");
return mem;
}
/* Otherwise, emulate calloc */
void *mem = allocator->mem_acquire(allocator, required_bytes);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- return NULL;
- }
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator");
+
memset(mem, 0, required_bytes);
- AWS_POSTCONDITION(mem != NULL);
return mem;
}
@@ -136,10 +183,7 @@ void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...) {
if (total_size > 0) {
allocation = aws_mem_acquire(allocator, total_size);
- if (!allocation) {
- aws_raise_error(AWS_ERROR_OOM);
- goto cleanup;
- }
+ AWS_PANIC_OOM(allocation, "Unhandled OOM encountered in aws_mem_acquire with allocator");
uint8_t *current_ptr = allocation;
@@ -155,7 +199,6 @@ void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...) {
}
}
-cleanup:
va_end(args_allocs);
return allocation;
}
@@ -185,9 +228,8 @@ int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize,
if (allocator->mem_realloc) {
void *newptr = allocator->mem_realloc(allocator, *ptr, oldsize, newsize);
- if (!newptr) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(newptr, "Unhandled OOM encountered in aws_mem_acquire with allocator");
+
*ptr = newptr;
return AWS_OP_SUCCESS;
}
@@ -198,9 +240,7 @@ int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize,
}
void *newptr = allocator->mem_acquire(allocator, newsize);
- if (!newptr) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(newptr, "Unhandled OOM encountered in aws_mem_acquire with allocator");
memcpy(newptr, *ptr, oldsize);
memset((uint8_t *)newptr + oldsize, 0, newsize - oldsize);
@@ -225,10 +265,6 @@ static void *s_cf_allocator_allocate(CFIndex alloc_size, CFOptionFlags hint, voi
void *mem = aws_mem_acquire(allocator, (size_t)alloc_size + sizeof(size_t));
- if (!mem) {
- return NULL;
- }
-
size_t allocation_size = (size_t)alloc_size + sizeof(size_t);
memcpy(mem, &allocation_size, sizeof(size_t));
return (void *)((uint8_t *)mem + sizeof(size_t));
@@ -252,9 +288,7 @@ static void *s_cf_allocator_reallocate(void *ptr, CFIndex new_size, CFOptionFlag
size_t original_size = 0;
memcpy(&original_size, original_allocation, sizeof(size_t));
- if (aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size)) {
- return NULL;
- }
+ aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size);
size_t new_allocation_size = (size_t)new_size;
memcpy(original_allocation, &new_allocation_size, sizeof(size_t));
@@ -298,9 +332,7 @@ CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator) {
cf_allocator = CFAllocatorCreate(NULL, &context);
- if (!cf_allocator) {
- aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_FATAL_ASSERT(cf_allocator && "creation of cf allocator failed!");
return cf_allocator;
}
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
index d30c67c37e..47f080acad 100644
--- a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
+++ b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
@@ -186,11 +186,15 @@ static void s_sba_clean_up(struct small_block_allocator *sba) {
for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
void *page_addr = NULL;
aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
- s_aligned_free(page_addr);
+ struct page_header *page = page_addr;
+ AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (bin)");
+ s_aligned_free(page);
}
if (bin->page_cursor) {
void *page_addr = s_page_base(bin->page_cursor);
- s_aligned_free(page_addr);
+ struct page_header *page = page_addr;
+ AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (page)");
+ s_aligned_free(page);
}
aws_array_list_clean_up(&bin->active_pages);
@@ -238,6 +242,53 @@ void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator) {
aws_mem_release(allocator, sba);
}
+size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator) {
+ AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator");
+ struct small_block_allocator *sba = sba_allocator->impl;
+ AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl");
+
+ size_t used = 0;
+ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
+ struct sba_bin *bin = &sba->bins[idx];
+ sba->lock(&bin->mutex);
+ for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
+ void *page_addr = NULL;
+ aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
+ struct page_header *page = page_addr;
+ used += page->alloc_count * bin->size;
+ }
+ if (bin->page_cursor) {
+ void *page_addr = s_page_base(bin->page_cursor);
+ struct page_header *page = page_addr;
+ used += page->alloc_count * bin->size;
+ }
+ sba->unlock(&bin->mutex);
+ }
+
+ return used;
+}
+
+size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator) {
+ AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator");
+ struct small_block_allocator *sba = sba_allocator->impl;
+ AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl");
+
+ size_t used = 0;
+ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
+ struct sba_bin *bin = &sba->bins[idx];
+ sba->lock(&bin->mutex);
+ used += (bin->active_pages.length + (bin->page_cursor != NULL)) * AWS_SBA_PAGE_SIZE;
+ sba->unlock(&bin->mutex);
+ }
+
+ return used;
+}
+
+size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator) {
+ (void)sba_allocator;
+ return AWS_SBA_PAGE_SIZE;
+}
+
/* NOTE: Expects the mutex to be held by the caller */
static void *s_sba_alloc_from_bin(struct sba_bin *bin) {
/* check the free list, hand chunks out in FIFO order */
diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
index 6385c146fb..ffc6e0d4c9 100644
--- a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
+++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
@@ -51,14 +51,36 @@ static bool s_has_sse42(void) {
static bool s_has_avx2(void) {
uint32_t abcd[4];
- uint32_t avx2_bmi12_mask = (1 << 5) | (1 << 3) | (1 << 8);
- /* CPUID.(EAX=01H, ECX=0H):ECX.FMA[bit 12]==1 &&
- CPUID.(EAX=01H, ECX=0H):ECX.MOVBE[bit 22]==1 &&
- CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1 */
+
+ /* Check AVX2:
+ * CPUID.(EAX=07H, ECX=0H):EBX.AVX2[bit 5]==1 */
+ uint32_t avx2_mask = (1 << 5);
aws_run_cpuid(7, 0, abcd);
+ if ((abcd[1] & avx2_mask) != avx2_mask) {
+ return false;
+ }
- if ((abcd[1] & avx2_bmi12_mask) != avx2_bmi12_mask)
+ /* Also check AVX:
+ * CPUID.(EAX=01H, ECX=0H):ECX.AVX[bit 28]==1
+ *
+ * NOTE: It SHOULD be impossible for a CPU to support AVX2 without supporting AVX.
+ * But we've received crash reports where the AVX2 feature check passed
+ * and then an AVX instruction caused an "invalid instruction" crash.
+ *
+ * We diagnosed these machines by asking users to run the sample program from:
+ * https://docs.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex?view=msvc-160
+ * and observed the following results:
+ *
+ * AVX not supported
+ * AVX2 supported
+ *
+ * We don't know for sure what was up with those machines, but this extra
+ * check should stop them from running our AVX/AVX2 code paths. */
+ uint32_t avx1_mask = (1 << 28);
+ aws_run_cpuid(1, 0, abcd);
+ if ((abcd[2] & avx1_mask) != avx1_mask) {
return false;
+ }
return true;
}
diff --git a/contrib/restricted/aws/aws-c-common/source/bus.c b/contrib/restricted/aws/aws-c-common/source/bus.c
new file mode 100644
index 0000000000..68bb29deda
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/bus.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <aws/common/bus.h>
+
+#include <aws/common/allocator.h>
+#include <aws/common/atomics.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/logging.h>
+#include <aws/common/mutex.h>
+#include <aws/common/thread.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4204) /* nonstandard extension used: non-constant aggregate initializer */
+#endif
+
+struct aws_bus {
+ struct aws_allocator *allocator;
+
+ /* vtable and additional data structures for delivery policy */
+ void *impl;
+};
+
+/* MUST be the first member of any impl to allow blind casting */
+struct bus_vtable {
+ void (*clean_up)(struct aws_bus *bus);
+
+ int (*send)(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *));
+
+ int (*subscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data);
+
+ void (*unsubscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data);
+};
+
+/* each bound callback is stored as a bus_listener in the slots table */
+struct bus_listener {
+ struct aws_linked_list_node list_node;
+ void *user_data;
+ aws_bus_listener_fn *deliver;
+};
+
+/* value type stored in each slot in the slots table in a bus */
+struct listener_list {
+ struct aws_allocator *allocator;
+ struct aws_linked_list listeners;
+};
+
+/* find a listener list (or NULL) by address */
+static struct listener_list *bus_find_listeners(struct aws_hash_table *slots, uint64_t address) {
+ struct aws_hash_element *elem = NULL;
+ if (aws_hash_table_find(slots, (void *)(uintptr_t)address, &elem)) {
+ return NULL;
+ }
+
+ if (!elem) {
+ return NULL;
+ }
+
+ struct listener_list *list = elem->value;
+ return list;
+}
+
+/* find a listener list by address, or create/insert/return a new one */
+static struct listener_list *bus_find_or_create_listeners(
+ struct aws_allocator *allocator,
+ struct aws_hash_table *slots,
+ uint64_t address) {
+ struct listener_list *list = bus_find_listeners(slots, address);
+ if (list) {
+ return list;
+ }
+
+ list = aws_mem_calloc(allocator, 1, sizeof(struct listener_list));
+ list->allocator = allocator;
+ aws_linked_list_init(&list->listeners);
+ aws_hash_table_put(slots, (void *)(uintptr_t)address, list, NULL);
+ return list;
+}
+
+static void s_bus_deliver_msg_to_slot(
+ struct aws_bus *bus,
+ uint64_t slot,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ const void *payload) {
+ (void)bus;
+ struct listener_list *list = bus_find_listeners(slots, slot);
+ if (!list) {
+ return;
+ }
+ struct aws_linked_list_node *node = aws_linked_list_begin(&list->listeners);
+ for (; node != aws_linked_list_end(&list->listeners); node = aws_linked_list_next(node)) {
+ struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node);
+ listener->deliver(address, payload, listener->user_data);
+ }
+}
+
+/* common delivery logic */
+static void s_bus_deliver_msg(
+ struct aws_bus *bus,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ const void *payload) {
+ s_bus_deliver_msg_to_slot(bus, AWS_BUS_ADDRESS_ALL, address, slots, payload);
+ s_bus_deliver_msg_to_slot(bus, address, address, slots, payload);
+}
+
+/* common subscribe logic */
+static int s_bus_subscribe(
+ struct aws_bus *bus,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ aws_bus_listener_fn *callback,
+ void *user_data) {
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot directly subscribe to AWS_BUS_ADDRESS_CLOSE(0)");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct listener_list *list = bus_find_or_create_listeners(bus->allocator, slots, address);
+ struct bus_listener *listener = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_listener));
+ listener->deliver = callback;
+ listener->user_data = user_data;
+ aws_linked_list_push_back(&list->listeners, &listener->list_node);
+
+ return AWS_OP_SUCCESS;
+}
+
+/* common unsubscribe logic */
+static void s_bus_unsubscribe(
+ struct aws_bus *bus,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ aws_bus_listener_fn *callback,
+ void *user_data) {
+ (void)bus;
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_WARN(AWS_LS_COMMON_BUS, "Attempted to unsubscribe from invalid address AWS_BUS_ADDRESS_CLOSE")
+ return;
+ }
+
+ struct listener_list *list = bus_find_listeners(slots, address);
+ if (!list) {
+ return;
+ }
+
+ struct aws_linked_list_node *node;
+ for (node = aws_linked_list_begin(&list->listeners); node != aws_linked_list_end(&list->listeners);
+ node = aws_linked_list_next(node)) {
+
+ struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node);
+ if (listener->deliver == callback && listener->user_data == user_data) {
+ aws_linked_list_remove(node);
+ aws_mem_release(list->allocator, listener);
+ return;
+ }
+ }
+}
+
+/* destructor for listener lists in the slots tables */
+void s_bus_destroy_listener_list(void *data) {
+ struct listener_list *list = data;
+ AWS_PRECONDITION(list->allocator);
+ /* call all listeners with an AWS_BUS_ADDRESS_CLOSE message type to clean up */
+ while (!aws_linked_list_empty(&list->listeners)) {
+ struct aws_linked_list_node *back = aws_linked_list_back(&list->listeners);
+ struct bus_listener *listener = AWS_CONTAINER_OF(back, struct bus_listener, list_node);
+ listener->deliver(AWS_BUS_ADDRESS_CLOSE, NULL, listener->user_data);
+ aws_linked_list_pop_back(&list->listeners);
+ aws_mem_release(list->allocator, listener);
+ }
+ aws_mem_release(list->allocator, list);
+}
+
+/*
+ * AWS_BUS_SYNC implementation
+ */
+struct bus_sync_impl {
+ struct bus_vtable vtable;
+ struct {
+ /* Map of address -> list of listeners */
+ struct aws_hash_table table;
+ } slots;
+};
+
+static void s_bus_sync_clean_up(struct aws_bus *bus) {
+ struct bus_sync_impl *impl = bus->impl;
+ aws_hash_table_clean_up(&impl->slots.table);
+ aws_mem_release(bus->allocator, impl);
+}
+
+static int s_bus_sync_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
+ struct bus_sync_impl *impl = bus->impl;
+ s_bus_deliver_msg(bus, address, &impl->slots.table, payload);
+ if (destructor) {
+ destructor(payload);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_bus_sync_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data) {
+ struct bus_sync_impl *impl = bus->impl;
+ return s_bus_subscribe(bus, address, &impl->slots.table, callback, user_data);
+}
+
+static void s_bus_sync_unsubscribe(
+ struct aws_bus *bus,
+ uint64_t address,
+ aws_bus_listener_fn *callback,
+ void *user_data) {
+ struct bus_sync_impl *impl = bus->impl;
+ s_bus_unsubscribe(bus, address, &impl->slots.table, callback, user_data);
+}
+
+static struct bus_vtable bus_sync_vtable = {
+ .clean_up = s_bus_sync_clean_up,
+ .send = s_bus_sync_send,
+ .subscribe = s_bus_sync_subscribe,
+ .unsubscribe = s_bus_sync_unsubscribe,
+};
+
+static void s_bus_sync_init(struct aws_bus *bus, const struct aws_bus_options *options) {
+ (void)options;
+
+ struct bus_sync_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_sync_impl));
+ impl->vtable = bus_sync_vtable;
+
+ if (aws_hash_table_init(
+ &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) {
+ goto error;
+ }
+
+ return;
+
+error:
+ aws_mem_release(bus->allocator, impl);
+}
+
+/*
+ * AWS_BUS_ASYNC implementation
+ */
+struct bus_async_impl {
+ struct bus_vtable vtable;
+ struct {
+ /* Map of address -> list of listeners */
+ struct aws_hash_table table;
+ } slots;
+
+ /* Queue of bus_messages to deliver */
+ struct {
+ struct aws_mutex mutex;
+ /* backing memory for the message free list */
+ void *buffer;
+ void *buffer_end; /* 1 past the end of buffer */
+ /* message free list */
+ struct aws_linked_list free; /* struct bus_message */
+ /* message delivery queue */
+ struct aws_linked_list msgs; /* struct bus_message */
+ /* list of pending adds/removes of listeners */
+ struct aws_linked_list subs; /* struct pending_listener */
+ } queue;
+
+ /* dispatch thread */
+ struct {
+ struct aws_thread thread;
+ struct aws_condition_variable notify;
+ bool running;
+ struct aws_atomic_var started;
+ struct aws_atomic_var exited;
+ } dispatch;
+
+ bool reliable;
+};
+
+/* represents a message in the queue on impls that queue */
+struct bus_message {
+ struct aws_linked_list_node list_node;
+ uint64_t address;
+ void *payload;
+
+ void (*destructor)(void *);
+};
+
+struct pending_listener {
+ struct aws_linked_list_node list_node;
+ uint64_t address;
+ aws_bus_listener_fn *listener;
+ void *user_data;
+ uint32_t add : 1;
+ uint32_t remove : 1;
+};
+
+static void s_bus_message_clean_up(struct bus_message *msg) {
+ if (msg->destructor) {
+ msg->destructor(msg->payload);
+ }
+ msg->destructor = NULL;
+ msg->payload = NULL;
+}
+
+/* Assumes the caller holds the lock */
+static void s_bus_async_free_message(struct aws_bus *bus, struct bus_message *msg) {
+ struct bus_async_impl *impl = bus->impl;
+ s_bus_message_clean_up(msg);
+ if ((void *)msg >= impl->queue.buffer && (void *)msg < impl->queue.buffer_end) {
+ AWS_ZERO_STRUCT(*msg);
+ aws_linked_list_push_back(&impl->queue.free, &msg->list_node);
+ return;
+ }
+ aws_mem_release(bus->allocator, msg);
+}
+
+/* Assumes the caller holds the lock */
+struct bus_message *s_bus_async_alloc_message(struct aws_bus *bus) {
+ struct bus_async_impl *impl = bus->impl;
+
+ /* try the free list first */
+ if (!aws_linked_list_empty(&impl->queue.free)) {
+ struct aws_linked_list_node *msg_node = aws_linked_list_pop_back(&impl->queue.free);
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ return msg;
+ }
+
+ /* unreliable will re-use the oldest message */
+ if (!impl->reliable) {
+ struct aws_linked_list_node *msg_node = aws_linked_list_pop_front(&impl->queue.msgs);
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ s_bus_async_free_message(bus, msg);
+ return s_bus_async_alloc_message(bus);
+ }
+
+ return aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_message));
+}
+
+/*
+ * resolve all adds and removes of listeners, in FIFO order
+ * NOTE: expects mutex to be held by caller
+ */
+static void s_bus_apply_listeners(struct aws_bus *bus, struct aws_linked_list *pending_subs) {
+ struct bus_async_impl *impl = bus->impl;
+ while (!aws_linked_list_empty(pending_subs)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(pending_subs);
+ struct pending_listener *listener = AWS_CONTAINER_OF(node, struct pending_listener, list_node);
+ if (listener->add) {
+ s_bus_subscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data);
+ } else if (listener->remove) {
+ s_bus_unsubscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data);
+ }
+ aws_mem_release(bus->allocator, listener);
+ }
+}
+
+static void s_bus_async_deliver_messages(struct aws_bus *bus, struct aws_linked_list *pending_msgs) {
+ struct bus_async_impl *impl = bus->impl;
+ struct aws_linked_list_node *msg_node = aws_linked_list_begin(pending_msgs);
+ for (; msg_node != aws_linked_list_end(pending_msgs); msg_node = aws_linked_list_next(msg_node)) {
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ s_bus_deliver_msg(bus, msg->address, &impl->slots.table, msg->payload);
+ s_bus_message_clean_up(msg);
+ }
+
+ /* push all pending messages back on the free list */
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ while (!aws_linked_list_empty(pending_msgs)) {
+ msg_node = aws_linked_list_pop_front(pending_msgs);
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ s_bus_async_free_message(bus, msg);
+ }
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+}
+
+static void s_bus_async_clean_up(struct aws_bus *bus) {
+ struct bus_async_impl *impl = bus->impl;
+
+ /* shut down delivery thread, clean up dispatch */
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: starting final drain", (void *)bus);
+ aws_mutex_lock(&impl->queue.mutex);
+ impl->dispatch.running = false;
+ aws_mutex_unlock(&impl->queue.mutex);
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+ /* Spin wait for the final drain and dispatch thread to complete */
+ while (!aws_atomic_load_int(&impl->dispatch.exited)) {
+ aws_thread_current_sleep(1000 * 1000); /* 1 microsecond */
+ }
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: finished final drain", (void *)bus);
+ aws_thread_join(&impl->dispatch.thread);
+ aws_thread_clean_up(&impl->dispatch.thread);
+ aws_condition_variable_clean_up(&impl->dispatch.notify);
+
+ /* should be impossible for subs or msgs to remain after final drain */
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.msgs));
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.subs));
+
+ /* this frees everything that the free/msgs lists point to */
+ if (impl->queue.buffer) {
+ aws_mem_release(bus->allocator, impl->queue.buffer);
+ }
+
+ aws_mutex_clean_up(&impl->queue.mutex);
+
+ aws_hash_table_clean_up(&impl->slots.table);
+ aws_mem_release(bus->allocator, impl);
+}
+
+static bool s_bus_async_should_wake_up(void *user_data) {
+ struct bus_async_impl *impl = user_data;
+ return !impl->dispatch.running || !aws_linked_list_empty(&impl->queue.subs) ||
+ !aws_linked_list_empty(&impl->queue.msgs);
+}
+
+static bool s_bus_async_is_running(struct bus_async_impl *impl) {
+ aws_mutex_lock(&impl->queue.mutex);
+ bool running = impl->dispatch.running;
+ aws_mutex_unlock(&impl->queue.mutex);
+ return running;
+}
+
+/* Async bus delivery thread loop */
+static void s_bus_async_deliver(void *user_data) {
+ struct aws_bus *bus = user_data;
+ struct bus_async_impl *impl = bus->impl;
+
+ aws_atomic_store_int(&impl->dispatch.started, 1);
+ AWS_LOGF_DEBUG(AWS_LS_COMMON_BUS, "bus %p: delivery thread loop started", (void *)bus);
+
+ /* once shutdown has been triggered, need to drain one more time to ensure all queues are empty */
+ int pending_drains = 1;
+ do {
+ struct aws_linked_list pending_msgs;
+ aws_linked_list_init(&pending_msgs);
+
+ struct aws_linked_list pending_subs;
+ aws_linked_list_init(&pending_subs);
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ aws_condition_variable_wait_pred(
+ &impl->dispatch.notify, &impl->queue.mutex, s_bus_async_should_wake_up, impl);
+
+ /* copy out any queued subs/unsubs */
+ aws_linked_list_swap_contents(&impl->queue.subs, &pending_subs);
+ /* copy out any queued messages */
+ aws_linked_list_swap_contents(&impl->queue.msgs, &pending_msgs);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* first resolve subs/unsubs */
+ if (!aws_linked_list_empty(&pending_subs)) {
+ s_bus_apply_listeners(bus, &pending_subs);
+ }
+
+ /* Then deliver queued messages */
+ if (!aws_linked_list_empty(&pending_msgs)) {
+ s_bus_async_deliver_messages(bus, &pending_msgs);
+ }
+ } while (s_bus_async_is_running(impl) || pending_drains--);
+
+ /* record that the dispatch thread is done */
+ aws_atomic_store_int(&impl->dispatch.exited, 1);
+}
+
+int s_bus_async_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
+ struct bus_async_impl *impl = bus->impl;
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ if (!impl->dispatch.running) {
+ AWS_LOGF_WARN(
+ AWS_LS_COMMON_BUS, "bus %p: message sent after clean_up: address: %" PRIu64 "", (void *)bus, address);
+ aws_mutex_unlock(&impl->queue.mutex);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct bus_message *msg = s_bus_async_alloc_message(bus);
+ msg->address = address;
+ msg->payload = payload;
+ msg->destructor = destructor;
+
+ /* push the message onto the delivery queue */
+ aws_linked_list_push_back(&impl->queue.msgs, &msg->list_node);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* notify the delivery thread to wake up */
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+
+ return AWS_OP_SUCCESS;
+}
+
+int s_bus_async_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_async_impl *impl = bus->impl;
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot subscribe to AWS_BUS_ADDRESS_CLOSE");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ if (!impl->dispatch.running) {
+ AWS_LOGF_WARN(
+ AWS_LS_COMMON_BUS,
+ "bus %p: subscribe requested after clean_up: address: %" PRIu64 "",
+ (void *)bus,
+ address);
+ aws_mutex_unlock(&impl->queue.mutex);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct pending_listener *sub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener));
+ sub->address = address;
+ sub->listener = listener;
+ sub->user_data = user_data;
+ sub->add = true;
+ aws_linked_list_push_back(&impl->queue.subs, &sub->list_node);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* notify the delivery thread to wake up */
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+ return AWS_OP_SUCCESS;
+}
+
+void s_bus_async_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_async_impl *impl = bus->impl;
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot unsubscribe from AWS_BUS_ADDRESS_CLOSE");
+ return;
+ }
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ if (!impl->dispatch.running) {
+ AWS_LOGF_WARN(
+ AWS_LS_COMMON_BUS,
+ "bus %p: unsubscribe requested after clean_up: address: %" PRIu64 "",
+ (void *)bus,
+ address);
+ aws_mutex_unlock(&impl->queue.mutex);
+ return;
+ }
+
+ struct pending_listener *unsub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener));
+ unsub->address = address;
+ unsub->listener = listener;
+ unsub->user_data = user_data;
+ unsub->remove = true;
+ aws_linked_list_push_back(&impl->queue.subs, &unsub->list_node);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* notify the delivery thread to wake up */
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+}
+
+static struct bus_vtable bus_async_vtable = {
+ .clean_up = s_bus_async_clean_up,
+ .send = s_bus_async_send,
+ .subscribe = s_bus_async_subscribe,
+ .unsubscribe = s_bus_async_unsubscribe,
+};
+
+static void s_bus_async_init(struct aws_bus *bus, const struct aws_bus_options *options) {
+ struct bus_async_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_async_impl));
+ impl->vtable = bus_async_vtable;
+ impl->reliable = (options->policy == AWS_BUS_ASYNC_RELIABLE);
+
+ /* init msg queue */
+ if (aws_mutex_init(&impl->queue.mutex)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize queue synchronization: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ aws_linked_list_init(&impl->queue.msgs);
+ aws_linked_list_init(&impl->queue.free);
+ aws_linked_list_init(&impl->queue.subs);
+
+ /* push as many bus_messages as we can into the free list from the buffer */
+ if (options->buffer_size) {
+ impl->queue.buffer = aws_mem_calloc(bus->allocator, 1, options->buffer_size);
+ impl->queue.buffer_end = ((uint8_t *)impl->queue.buffer) + options->buffer_size;
+ const int msg_count = (int)(options->buffer_size / sizeof(struct bus_message));
+ for (int msg_idx = 0; msg_idx < msg_count; ++msg_idx) {
+ struct bus_message *msg = (void *)&((char *)impl->queue.buffer)[msg_idx * sizeof(struct bus_message)];
+ aws_linked_list_push_back(&impl->queue.free, &msg->list_node);
+ }
+ }
+
+ /* init subscription table */
+ if (aws_hash_table_init(
+ &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize bus addressing table: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Setup dispatch thread */
+ if (aws_condition_variable_init(&impl->dispatch.notify)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize async notify: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_thread_init(&impl->dispatch.thread, bus->allocator)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize background thread: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ impl->dispatch.running = true;
+ aws_atomic_init_int(&impl->dispatch.started, 0);
+ aws_atomic_init_int(&impl->dispatch.exited, 0);
+ if (aws_thread_launch(&impl->dispatch.thread, s_bus_async_deliver, bus, aws_default_thread_options())) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to launch delivery thread: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* wait for dispatch thread to start before returning control */
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Waiting for delivery thread to start", (void *)bus);
+ while (!aws_atomic_load_int(&impl->dispatch.started)) {
+ aws_thread_current_sleep(1000 * 1000);
+ }
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Delivery thread started", (void *)bus);
+
+ return;
+
+error:
+ aws_thread_clean_up(&impl->dispatch.thread);
+ aws_condition_variable_clean_up(&impl->dispatch.notify);
+ aws_hash_table_clean_up(&impl->slots.table);
+ aws_mem_release(bus->allocator, &impl->queue.buffer);
+ aws_mutex_clean_up(&impl->queue.mutex);
+ aws_mem_release(bus->allocator, impl);
+ bus->impl = NULL;
+}
+
+/*
+ * Public API
+ */
+struct aws_bus *aws_bus_new(struct aws_allocator *allocator, const struct aws_bus_options *options) {
+ struct aws_bus *bus = aws_mem_calloc(allocator, 1, sizeof(struct aws_bus));
+ bus->allocator = allocator;
+
+ switch (options->policy) {
+ case AWS_BUS_ASYNC_RELIABLE:
+ case AWS_BUS_ASYNC_UNRELIABLE:
+ s_bus_async_init(bus, options);
+ break;
+ case AWS_BUS_SYNC_RELIABLE:
+ s_bus_sync_init(bus, options);
+ break;
+ }
+
+ if (!bus->impl) {
+ aws_mem_release(allocator, bus);
+ return NULL;
+ }
+
+ return bus;
+}
+
+void aws_bus_destroy(struct aws_bus *bus) {
+ struct bus_vtable *vtable = bus->impl;
+ vtable->clean_up(bus);
+ aws_mem_release(bus->allocator, bus);
+}
+
+int aws_bus_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_vtable *vtable = bus->impl;
+ return vtable->subscribe(bus, address, listener, user_data);
+}
+
+void aws_bus_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_vtable *vtable = bus->impl;
+ vtable->unsubscribe(bus, address, listener, user_data);
+}
+
+int aws_bus_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
+ struct bus_vtable *vtable = bus->impl;
+ return vtable->send(bus, address, payload, destructor);
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
diff --git a/contrib/restricted/aws/aws-c-common/source/byte_buf.c b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
index ca18f4121b..f52aa16b45 100644
--- a/contrib/restricted/aws/aws-c-common/source/byte_buf.c
+++ b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
@@ -496,7 +496,7 @@ uint64_t aws_hash_array_ignore_case(const void *array, const size_t len) {
const uint64_t fnv_prime = 0x100000001b3ULL;
const uint8_t *i = array;
- const uint8_t *end = i + len;
+ const uint8_t *end = (i == NULL) ? NULL : (i + len);
uint64_t hash = fnv_offset_basis;
while (i != end) {
@@ -558,6 +558,42 @@ bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cu
return rv;
}
+bool aws_byte_cursor_starts_with(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix) {
+
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(input));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(prefix));
+
+ if (input->len < prefix->len) {
+ return false;
+ }
+
+ struct aws_byte_cursor start = {.ptr = input->ptr, .len = prefix->len};
+ bool rv = aws_byte_cursor_eq(&start, prefix);
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(input));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(prefix));
+ return rv;
+}
+
+bool aws_byte_cursor_starts_with_ignore_case(
+ const struct aws_byte_cursor *input,
+ const struct aws_byte_cursor *prefix) {
+
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(input));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(prefix));
+
+ if (input->len < prefix->len) {
+ return false;
+ }
+
+ struct aws_byte_cursor start = {.ptr = input->ptr, .len = prefix->len};
+ bool rv = aws_byte_cursor_eq_ignore_case(&start, prefix);
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(input));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(prefix));
+ return rv;
+}
+
int aws_byte_buf_append(struct aws_byte_buf *to, const struct aws_byte_cursor *from) {
AWS_PRECONDITION(aws_byte_buf_is_valid(to));
AWS_PRECONDITION(aws_byte_cursor_is_valid(from));
@@ -750,7 +786,13 @@ int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity)
AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
return AWS_OP_SUCCESS;
}
-
+ if (!buffer->buffer && !buffer->capacity && requested_capacity > buffer->capacity) {
+ if (aws_byte_buf_init(buffer, buffer->allocator, requested_capacity)) {
+ return AWS_OP_ERR;
+ }
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
+ return AWS_OP_SUCCESS;
+ }
if (aws_mem_realloc(buffer->allocator, (void **)&buffer->buffer, buffer->capacity, requested_capacity)) {
return AWS_OP_ERR;
}
@@ -857,6 +899,13 @@ int aws_byte_cursor_compare_lookup(
AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs));
AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs));
AWS_PRECONDITION(AWS_MEM_IS_READABLE(lookup_table, 256));
+ if (lhs->len == 0 && rhs->len == 0) {
+ return 0;
+ } else if (lhs->len == 0) {
+ return -1;
+ } else if (rhs->len == 0) {
+ return 1;
+ }
const uint8_t *lhs_curr = lhs->ptr;
const uint8_t *lhs_end = lhs_curr + lhs->len;
@@ -1047,8 +1096,7 @@ struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cur
} else {
rv.ptr = cursor->ptr;
rv.len = len;
-
- cursor->ptr += len;
+ cursor->ptr = (cursor->ptr == NULL) ? NULL : cursor->ptr + len;
cursor->len -= len;
}
AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
@@ -1089,7 +1137,7 @@ struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *co
/* Make sure anything acting upon the returned cursor _also_ doesn't advance past NULL */
rv.len = len & mask;
- cursor->ptr += len;
+ cursor->ptr = (cursor->ptr == NULL) ? NULL : cursor->ptr + len;
cursor->len -= len;
} else {
rv.ptr = NULL;
@@ -1371,7 +1419,7 @@ bool aws_byte_buf_advance(
AWS_PRECONDITION(aws_byte_buf_is_valid(buffer));
AWS_PRECONDITION(aws_byte_buf_is_valid(output));
if (buffer->capacity - buffer->len >= len) {
- *output = aws_byte_buf_from_array(buffer->buffer + buffer->len, len);
+ *output = aws_byte_buf_from_array((buffer->buffer == NULL) ? NULL : buffer->buffer + buffer->len, len);
buffer->len += len;
output->len = 0;
AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
@@ -1611,18 +1659,52 @@ bool aws_isxdigit(uint8_t ch) {
bool aws_isspace(uint8_t ch) {
switch (ch) {
case 0x20: /* ' ' - space */
- return true;
case 0x09: /* '\t' - horizontal tab */
- return true;
case 0x0A: /* '\n' - line feed */
- return true;
case 0x0B: /* '\v' - vertical tab */
- return true;
case 0x0C: /* '\f' - form feed */
- return true;
case 0x0D: /* '\r' - carriage return */
return true;
default:
return false;
}
}
+
+static int s_read_unsigned(struct aws_byte_cursor cursor, uint64_t *dst, uint8_t base) {
+ uint64_t val = 0;
+ *dst = 0;
+
+ if (cursor.len == 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ const uint8_t *hex_to_num_table = aws_lookup_table_hex_to_num_get();
+
+ /* read from left to right */
+ for (size_t i = 0; i < cursor.len; ++i) {
+ const uint8_t c = cursor.ptr[i];
+ const uint8_t cval = hex_to_num_table[c];
+ if (cval >= base) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (aws_mul_u64_checked(val, base, &val)) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+
+ if (aws_add_u64_checked(val, cval, &val)) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ }
+
+ *dst = val;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_byte_cursor_utf8_parse_u64(struct aws_byte_cursor cursor, uint64_t *dst) {
+ return s_read_unsigned(cursor, dst, 10 /*base*/);
+}
+
+int aws_byte_cursor_utf8_parse_u64_hex(struct aws_byte_cursor cursor, uint64_t *dst) {
+ return s_read_unsigned(cursor, dst, 16 /*base*/);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
index ccbe6d1820..bf2db81e0a 100644
--- a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
+++ b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
@@ -2,13 +2,18 @@
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
+#include <aws/common/byte_buf.h>
#include <aws/common/command_line_parser.h>
+#include <ctype.h>
+
int aws_cli_optind = 1;
int aws_cli_opterr = -1;
int aws_cli_optopt = 0;
+bool aws_cli_on_arg = false;
const char *aws_cli_optarg = NULL;
+const char *aws_cli_positional_arg = NULL;
static const struct aws_cli_option *s_find_option_from_char(
const struct aws_cli_option *longopts,
@@ -31,6 +36,16 @@ static const struct aws_cli_option *s_find_option_from_char(
return NULL;
}
+AWS_COMMON_API void aws_cli_reset_state(void) {
+ aws_cli_optind = 1;
+ aws_cli_opterr = -1;
+ aws_cli_optopt = 0;
+ aws_cli_on_arg = false;
+
+ aws_cli_optarg = NULL;
+ aws_cli_positional_arg = NULL;
+}
+
static const struct aws_cli_option *s_find_option_from_c_str(
const struct aws_cli_option *longopts,
const char *search_for,
@@ -70,22 +85,35 @@ int aws_cli_getopt_long(
char second_char = argv[aws_cli_optind][1];
char *option_start = NULL;
const struct aws_cli_option *option = NULL;
+ bool positional_arg_encountered = false;
if (first_char == '-' && second_char != '-') {
+ aws_cli_on_arg = true;
+ positional_arg_encountered = false;
option_start = &argv[aws_cli_optind][1];
option = s_find_option_from_char(longopts, *option_start, longindex);
} else if (first_char == '-' && second_char == '-') {
+ aws_cli_on_arg = true;
+ positional_arg_encountered = false;
option_start = &argv[aws_cli_optind][2];
option = s_find_option_from_c_str(longopts, option_start, longindex);
} else {
- return -1;
+ if (!aws_cli_on_arg) {
+ aws_cli_positional_arg = argv[aws_cli_optind];
+ positional_arg_encountered = true;
+ } else {
+ aws_cli_on_arg = false;
+ aws_cli_positional_arg = NULL;
+ }
}
aws_cli_optind++;
if (option) {
bool has_arg = false;
+ aws_cli_on_arg = false;
+ aws_cli_positional_arg = NULL;
- char *opt_value = memchr(optstring, option->val, strlen(optstring));
+ char *opt_value = memchr(optstring, option->val, strlen(optstring) + 1);
if (!opt_value) {
return '?';
}
@@ -105,5 +133,28 @@ int aws_cli_getopt_long(
return option->val;
}
- return '?';
+ /* start of text to indicate we just have a text argument. */
+ return positional_arg_encountered ? 0x02 : '?';
+}
+
+int aws_cli_dispatch_on_subcommand(
+ int argc,
+ char *const argv[],
+ struct aws_cli_subcommand_dispatch *dispatch_table,
+ int table_length,
+ void *user_data) {
+ if (argc >= 2) {
+ struct aws_byte_cursor arg_name = aws_byte_cursor_from_c_str(argv[1]);
+ for (int i = 0; i < table_length; ++i) {
+ struct aws_byte_cursor cmd_name = aws_byte_cursor_from_c_str(dispatch_table[i].command_name);
+
+ if (aws_byte_cursor_eq_ignore_case(&arg_name, &cmd_name)) {
+ return dispatch_table[i].subcommand_fn(argc - 1, &argv[1], (const char *)arg_name.ptr, user_data);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/common.c b/contrib/restricted/aws/aws-c-common/source/common.c
index 88c5d262c8..83a91b768f 100644
--- a/contrib/restricted/aws/aws-c-common/source/common.c
+++ b/contrib/restricted/aws/aws-c-common/source/common.c
@@ -4,9 +4,11 @@
*/
#include <aws/common/common.h>
+#include <aws/common/json.h>
#include <aws/common/logging.h>
#include <aws/common/math.h>
#include <aws/common/private/dlloads.h>
+#include <aws/common/private/thread_shared.h>
#include <stdarg.h>
#include <stdlib.h>
@@ -28,6 +30,11 @@
#endif
long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long) = NULL;
+int (*g_numa_available_ptr)(void) = NULL;
+int (*g_numa_num_configured_nodes_ptr)(void) = NULL;
+int (*g_numa_num_possible_cpus_ptr)(void) = NULL;
+int (*g_numa_node_of_cpu_ptr)(int cpu) = NULL;
+
void *g_libnuma_handle = NULL;
void aws_secure_zero(void *pBuf, size_t bufsize) {
@@ -77,6 +84,9 @@ static struct aws_error_info errors[] = {
AWS_ERROR_OOM,
"Out of memory."),
AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_NO_SPACE,
+ "Out of space on disk."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
AWS_ERROR_UNKNOWN,
"Unknown error."),
AWS_DEFINE_ERROR_INFO_COMMON(
@@ -226,6 +236,20 @@ static struct aws_error_info errors[] = {
AWS_DEFINE_ERROR_INFO_COMMON(
AWS_ERROR_DIVIDE_BY_ZERO,
"Attempt to divide a number by zero."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_INVALID_FILE_HANDLE,
+ "Invalid file handle"),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_OPERATION_INTERUPTED,
+ "The operation was interrupted."
+ ),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_DIRECTORY_NOT_EMPTY,
+ "An operation on a directory was attempted which is not allowed when the directory is not empty."
+ ),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_PLATFORM_NOT_SUPPORTED,
+ "Feature not supported on this platform"),
};
/* clang-format on */
@@ -244,8 +268,11 @@ static struct aws_log_subject_info s_common_log_subject_infos[] = {
"task-scheduler",
"Subject for task scheduler or task specific logging."),
DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_THREAD, "thread", "Subject for logging thread related functions."),
- DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_XML_PARSER, "xml-parser", "Subject for xml parser specific logging."),
DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_MEMTRACE, "memtrace", "Output from the aws_mem_trace_dump function"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_XML_PARSER, "xml-parser", "Subject for xml parser specific logging."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_IO, "common-io", "Common IO utilities"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_BUS, "bus", "Message bus"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_TEST, "test", "Unit/integration testing"),
};
static struct aws_log_subject_info_list s_common_log_subject_list = {
@@ -262,11 +289,26 @@ void aws_common_library_init(struct aws_allocator *allocator) {
s_common_library_initialized = true;
aws_register_error_info(&s_list);
aws_register_log_subject_info_list(&s_common_log_subject_list);
+ aws_thread_initialize_thread_management();
+ aws_json_module_init(allocator);
/* NUMA is funky and we can't rely on libnuma.so being available. We also don't want to take a hard dependency on it,
* try and load it if we can. */
#if !defined(_WIN32) && !defined(WIN32)
- g_libnuma_handle = dlopen("libnuma.so", RTLD_NOW);
+ /* libnuma defines set_mempolicy() as a WEAK symbol. Loading into the global symbol table overwrites symbols and
+ assumptions due to the way loaders and dlload are often implemented and those symbols are defined by things
+ like libpthread.so on some unix distros. Sorry about the memory usage here, but it's our only safe choice.
+ Also, please don't do numa configurations if memory is your economic bottlneck. */
+ g_libnuma_handle = dlopen("libnuma.so", RTLD_LOCAL);
+
+ /* turns out so versioning is really inconsistent these days */
+ if (!g_libnuma_handle) {
+ g_libnuma_handle = dlopen("libnuma.so.1", RTLD_LOCAL);
+ }
+
+ if (!g_libnuma_handle) {
+ g_libnuma_handle = dlopen("libnuma.so.2", RTLD_LOCAL);
+ }
if (g_libnuma_handle) {
AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so loaded");
@@ -276,6 +318,35 @@ void aws_common_library_init(struct aws_allocator *allocator) {
} else {
AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() failed to load");
}
+
+ *(void **)(&g_numa_available_ptr) = dlsym(g_libnuma_handle, "numa_available");
+ if (g_numa_available_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_available() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_available() failed to load");
+ }
+
+ *(void **)(&g_numa_num_configured_nodes_ptr) = dlsym(g_libnuma_handle, "numa_num_configured_nodes");
+ if (g_numa_num_configured_nodes_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_configured_nodes() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_configured_nodes() failed to load");
+ }
+
+ *(void **)(&g_numa_num_possible_cpus_ptr) = dlsym(g_libnuma_handle, "numa_num_possible_cpus");
+ if (g_numa_num_possible_cpus_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_possible_cpus() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_possible_cpus() failed to load");
+ }
+
+ *(void **)(&g_numa_node_of_cpu_ptr) = dlsym(g_libnuma_handle, "numa_node_of_cpu");
+ if (g_numa_node_of_cpu_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_node_of_cpu() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_node_of_cpu() failed to load");
+ }
+
} else {
AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so failed to load");
}
@@ -286,8 +357,10 @@ void aws_common_library_init(struct aws_allocator *allocator) {
void aws_common_library_clean_up(void) {
if (s_common_library_initialized) {
s_common_library_initialized = false;
+ aws_thread_join_all_managed();
aws_unregister_error_info(&s_list);
aws_unregister_log_subject_info_list(&s_common_log_subject_list);
+ aws_json_module_cleanup();
#if !defined(_WIN32) && !defined(WIN32)
if (g_libnuma_handle) {
dlclose(g_libnuma_handle);
diff --git a/contrib/restricted/aws/aws-c-common/source/date_time.c b/contrib/restricted/aws/aws-c-common/source/date_time.c
index 8d08e57ad8..77ec6ae0c1 100644
--- a/contrib/restricted/aws/aws-c-common/source/date_time.c
+++ b/contrib/restricted/aws/aws-c-common/source/date_time.c
@@ -12,6 +12,7 @@
#include <aws/common/time.h>
#include <ctype.h>
+#include <math.h>
static const char *RFC822_DATE_FORMAT_STR_MINUS_Z = "%a, %d %b %Y %H:%M:%S GMT";
static const char *RFC822_DATE_FORMAT_STR_WITH_Z = "%a, %d %b %Y %H:%M:%S %Z";
@@ -22,8 +23,8 @@ static const char *ISO_8601_LONG_BASIC_DATE_FORMAT_STR = "%Y%m%dT%H%M%SZ";
static const char *ISO_8601_SHORT_BASIC_DATE_FORMAT_STR = "%Y%m%d";
#define STR_TRIPLET_TO_INDEX(str) \
- (((uint32_t)(uint8_t)tolower((str)[0]) << 0) | ((uint32_t)(uint8_t)tolower((str)[1]) << 8) | \
- ((uint32_t)(uint8_t)tolower((str)[2]) << 16))
+ (((uint32_t)tolower((uint8_t)((str)[0])) << 0) | ((uint32_t)tolower((uint8_t)((str)[1])) << 8) | \
+ ((uint32_t)tolower((uint8_t)((str)[2])) << 16))
static uint32_t s_jan = 0;
static uint32_t s_feb = 0;
@@ -140,7 +141,7 @@ static bool is_utc_time_zone(const char *str) {
}
if (len == 2) {
- return tolower(str[0]) == 'u' && tolower(str[1]) == 't';
+ return tolower((uint8_t)str[0]) == 'u' && tolower((uint8_t)str[1]) == 't';
}
if (len < 3) {
@@ -170,21 +171,25 @@ struct tm s_get_time_struct(struct aws_date_time *dt, bool local_time) {
}
void aws_date_time_init_now(struct aws_date_time *dt) {
- uint64_t current_time = 0;
- aws_sys_clock_get_ticks(&current_time);
- dt->timestamp = (time_t)aws_timestamp_convert(current_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
- dt->gmt_time = s_get_time_struct(dt, false);
- dt->local_time = s_get_time_struct(dt, true);
+ uint64_t current_time_ns = 0;
+ aws_sys_clock_get_ticks(&current_time_ns);
+ aws_date_time_init_epoch_millis(
+ dt, aws_timestamp_convert(current_time_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL));
}
void aws_date_time_init_epoch_millis(struct aws_date_time *dt, uint64_t ms_since_epoch) {
- dt->timestamp = (time_t)(ms_since_epoch / AWS_TIMESTAMP_MILLIS);
+ uint64_t milliseconds = 0;
+ dt->timestamp =
+ (time_t)aws_timestamp_convert(ms_since_epoch, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, &milliseconds);
+ dt->milliseconds = (uint16_t)milliseconds;
dt->gmt_time = s_get_time_struct(dt, false);
dt->local_time = s_get_time_struct(dt, true);
}
void aws_date_time_init_epoch_secs(struct aws_date_time *dt, double sec_ms) {
- dt->timestamp = (time_t)sec_ms;
+ double integral = 0;
+ dt->milliseconds = (uint16_t)(round(modf(sec_ms, &integral) * AWS_TIMESTAMP_MILLIS));
+ dt->timestamp = (time_t)integral;
dt->gmt_time = s_get_time_struct(dt, false);
dt->local_time = s_get_time_struct(dt, true);
}
@@ -629,6 +634,7 @@ int aws_date_time_init_from_str_cursor(
* timestamp. */
dt->timestamp -= seconds_offset;
+ dt->milliseconds = 0U;
dt->gmt_time = s_get_time_struct(dt, false);
dt->local_time = s_get_time_struct(dt, true);
@@ -743,15 +749,17 @@ int aws_date_time_to_utc_time_short_str(
}
double aws_date_time_as_epoch_secs(const struct aws_date_time *dt) {
- return (double)dt->timestamp;
+ return (double)dt->timestamp + (double)(dt->milliseconds / 1000.0);
}
uint64_t aws_date_time_as_nanos(const struct aws_date_time *dt) {
- return (uint64_t)dt->timestamp * AWS_TIMESTAMP_NANOS;
+ return aws_timestamp_convert((uint64_t)dt->timestamp, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL) +
+ aws_timestamp_convert((uint64_t)dt->milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
}
uint64_t aws_date_time_as_millis(const struct aws_date_time *dt) {
- return (uint64_t)dt->timestamp * AWS_TIMESTAMP_MILLIS;
+ return aws_timestamp_convert((uint64_t)dt->timestamp, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL) +
+ (uint64_t)dt->milliseconds;
}
uint16_t aws_date_time_year(const struct aws_date_time *dt, bool local_time) {
diff --git a/contrib/restricted/aws/aws-c-common/source/error.c b/contrib/restricted/aws/aws-c-common/source/error.c
index 60e6c9e799..bdd4dfcd67 100644
--- a/contrib/restricted/aws/aws-c-common/source/error.c
+++ b/contrib/restricted/aws/aws-c-common/source/error.c
@@ -198,6 +198,8 @@ int aws_translate_and_raise_io_error(int error_no) {
return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
case ENOMEM:
return aws_raise_error(AWS_ERROR_OOM);
+ case ENOSPC:
+ return aws_raise_error(AWS_ERROR_NO_SPACE);
default:
return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
new file mode 100644
index 0000000000..8dd79bf1ec
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
@@ -0,0 +1,3113 @@
+/*
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+/* cJSON */
+/* JSON parser in C. */
+
+/* disable warnings about old C89 functions in MSVC */
+#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER)
+#define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+#if defined(_MSC_VER)
+#pragma warning (push)
+/* disable warning about single line comments in system headers */
+#pragma warning (disable : 4001)
+#endif
+
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <ctype.h>
+#include <float.h>
+
+#ifdef ENABLE_LOCALES
+#include <locale.h>
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+
+#include <aws/common/external/cJSON.h>
+
+/* define our own boolean type */
+#ifdef true
+#undef true
+#endif
+#define true ((cJSON_bool)1)
+
+#ifdef false
+#undef false
+#endif
+#define false ((cJSON_bool)0)
+
+/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */
+#ifndef isinf
+#define isinf(d) (isnan((d - d)) && !isnan(d))
+#endif
+#ifndef isnan
+#define isnan(d) (d != d)
+#endif
+
+#ifndef NAN
+#ifdef _WIN32
+#define NAN sqrt(-1.0)
+#else
+#define NAN 0.0/0.0
+#endif
+#endif
+
+typedef struct {
+ const unsigned char *json;
+ size_t position;
+} error;
+static error global_error = { NULL, 0 };
+
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void)
+{
+ return (const char*) (global_error.json + global_error.position);
+}
+
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item)
+{
+ if (!cJSON_IsString(item))
+ {
+ return NULL;
+ }
+
+ return item->valuestring;
+}
+
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item)
+{
+ if (!cJSON_IsNumber(item))
+ {
+ return (double) NAN;
+ }
+
+ return item->valuedouble;
+}
+
+/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */
+#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 15)
+ #error cJSON.h and cJSON.c have different versions. Make sure that both have the same.
+#endif
+
+CJSON_PUBLIC(const char*) cJSON_Version(void)
+{
+ static char version[15];
+ snprintf(version, sizeof(version) / sizeof(char), "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH);
+
+ return version;
+}
+
+/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */
+static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2)
+{
+ if ((string1 == NULL) || (string2 == NULL))
+ {
+ return 1;
+ }
+
+ if (string1 == string2)
+ {
+ return 0;
+ }
+
+ for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++)
+ {
+ if (*string1 == '\0')
+ {
+ return 0;
+ }
+ }
+
+ return tolower(*string1) - tolower(*string2);
+}
+
+typedef struct internal_hooks
+{
+ void *(CJSON_CDECL *allocate)(size_t size);
+ void (CJSON_CDECL *deallocate)(void *pointer);
+ void *(CJSON_CDECL *reallocate)(void *pointer, size_t size);
+} internal_hooks;
+
+#if defined(_MSC_VER)
+/* work around MSVC error C2322: '...' address of dllimport '...' is not static */
+static void * CJSON_CDECL internal_malloc(size_t size)
+{
+ return malloc(size);
+}
+static void CJSON_CDECL internal_free(void *pointer)
+{
+ free(pointer);
+}
+static void * CJSON_CDECL internal_realloc(void *pointer, size_t size)
+{
+ return realloc(pointer, size);
+}
+#else
+#define internal_malloc malloc
+#define internal_free free
+#define internal_realloc realloc
+#endif
+
+/* strlen of character literals resolved at compile time */
+#define static_strlen(string_literal) (sizeof(string_literal) - sizeof(""))
+
+static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; // NOLINT
+
+static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) // NOLINT
+{
+ size_t length = 0;
+ unsigned char *copy = NULL;
+
+ if (string == NULL)
+ {
+ return NULL;
+ }
+
+ length = strlen((const char*)string) + sizeof("");
+ copy = (unsigned char*)hooks->allocate(length);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ memcpy(copy, string, length);
+
+ return copy;
+}
+
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) // NOLINT
+{
+ if (hooks == NULL)
+ {
+ /* Reset hooks */
+ global_hooks.allocate = malloc;
+ global_hooks.deallocate = free;
+ global_hooks.reallocate = realloc;
+ return;
+ }
+
+ global_hooks.allocate = malloc;
+ if (hooks->malloc_fn != NULL)
+ {
+ global_hooks.allocate = hooks->malloc_fn;
+ }
+
+ global_hooks.deallocate = free;
+ if (hooks->free_fn != NULL)
+ {
+ global_hooks.deallocate = hooks->free_fn;
+ }
+
+ /* use realloc only if both free and malloc are used */
+ global_hooks.reallocate = NULL;
+ if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free))
+ {
+ global_hooks.reallocate = realloc;
+ }
+}
+
+/* Internal constructor. */
+static cJSON *cJSON_New_Item(const internal_hooks * const hooks)
+{
+ cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON));
+ if (node)
+ {
+ memset(node, '\0', sizeof(cJSON));
+ }
+
+ return node;
+}
+
+/* Delete a cJSON structure. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item)
+{
+ cJSON *next = NULL;
+ while (item != NULL)
+ {
+ next = item->next;
+ if (!(item->type & cJSON_IsReference) && (item->child != NULL))
+ {
+ cJSON_Delete(item->child);
+ }
+ if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL))
+ {
+ global_hooks.deallocate(item->valuestring);
+ }
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ global_hooks.deallocate(item->string);
+ }
+ global_hooks.deallocate(item);
+ item = next;
+ }
+}
+
+/* get the decimal point character of the current locale */
+static unsigned char get_decimal_point(void)
+{
+#ifdef ENABLE_LOCALES
+ struct lconv *lconv = localeconv();
+ return (unsigned char) lconv->decimal_point[0];
+#else
+ return '.';
+#endif
+}
+
+typedef struct
+{
+ const unsigned char *content;
+ size_t length;
+ size_t offset;
+ size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */
+ internal_hooks hooks;
+} parse_buffer;
+
+/* check if the given size is left to read in a given parse buffer (starting with 1) */
+#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) // NOLINT
+/* check if the buffer can be accessed at the given index (starting with 0) */
+#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) // NOLINT
+#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index))
+/* get a pointer to the buffer at the position */
+#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset)
+
+/* Parse the input text to generate a number, and populate the result into item. */
+static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) // NOLINT
+{
+ double number = 0;
+ unsigned char *after_end = NULL;
+ unsigned char number_c_string[64];
+ unsigned char decimal_point = get_decimal_point(); // NOLINT
+ size_t i = 0;
+
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false; // NOLINT
+ }
+
+ /* copy the number into a temporary buffer and replace '.' with the decimal point
+ * of the current locale (for strtod)
+ * This also takes care of '\0' not necessarily being available for marking the end of the input */
+ for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++)
+ {
+ switch (buffer_at_offset(input_buffer)[i])
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '+':
+ case '-':
+ case 'e':
+ case 'E':
+ number_c_string[i] = buffer_at_offset(input_buffer)[i];
+ break;
+
+ case '.':
+ number_c_string[i] = decimal_point;
+ break;
+
+ default:
+ goto loop_end;
+ }
+ }
+loop_end:
+ number_c_string[i] = '\0';
+
+ number = strtod((const char*)number_c_string, (char**)&after_end);
+ if (number_c_string == after_end)
+ {
+ return false; /* parse_error */ // NOLINT
+ }
+
+ item->valuedouble = number;
+
+ /* use saturation in case of overflow */
+ if (number >= INT_MAX)
+ { // NOLINT
+ item->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)number;
+ }
+
+ item->type = cJSON_Number; // NOLINT
+
+ input_buffer->offset += (size_t)(after_end - number_c_string);
+ return true; // NOLINT
+}
+
+/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) // NOLINT
+{
+ if (number >= INT_MAX)
+ {
+ object->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ object->valueint = INT_MIN;
+ }
+ else
+ {
+ object->valueint = (int)number;
+ }
+
+ return object->valuedouble = number;
+}
+
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring)
+{
+ char *copy = NULL;
+ /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */
+ if (!(object->type & cJSON_String) || (object->type & cJSON_IsReference))
+ {
+ return NULL;
+ }
+ if (strlen(valuestring) <= strlen(object->valuestring))
+ {
+ size_t value_length = strlen(valuestring) + sizeof("");
+ memcpy(object->valuestring, valuestring, value_length);
+ return object->valuestring;
+ }
+ copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ if (object->valuestring != NULL)
+ {
+ cJSON_free(object->valuestring);
+ }
+ object->valuestring = copy;
+
+ return copy;
+}
+
+typedef struct
+{
+ unsigned char *buffer;
+ size_t length;
+ size_t offset;
+ size_t depth; /* current nesting depth (for formatted printing) */
+ cJSON_bool noalloc;
+ cJSON_bool format; /* is this print a formatted print */
+ internal_hooks hooks;
+} printbuffer;
+
+/* realloc printbuffer if necessary to have at least "needed" bytes more */
+static unsigned char* ensure(printbuffer * const p, size_t needed) // NOLINT
+{
+ unsigned char *newbuffer = NULL;
+ size_t newsize = 0;
+
+ if ((p == NULL) || (p->buffer == NULL))
+ {
+ return NULL;
+ }
+
+ if ((p->length > 0) && (p->offset >= p->length))
+ {
+ /* make sure that offset is valid */
+ return NULL;
+ }
+
+ if (needed > INT_MAX)
+ {
+ /* sizes bigger than INT_MAX are currently not supported */
+ return NULL;
+ }
+
+ needed += p->offset + 1;
+ if (needed <= p->length)
+ {
+ return p->buffer + p->offset;
+ }
+
+ if (p->noalloc) {
+ return NULL;
+ }
+
+ /* calculate new buffer size */
+ if (needed > (INT_MAX / 2))
+ {
+ /* overflow of int, use INT_MAX if possible */
+ if (needed <= INT_MAX)
+ {
+ newsize = INT_MAX;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ newsize = needed * 2;
+ }
+
+ if (p->hooks.reallocate != NULL)
+ {
+ /* reallocate with realloc if available */
+ newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize);
+ if (newbuffer == NULL)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+ }
+ else
+ {
+ /* otherwise reallocate manually */
+ newbuffer = (unsigned char*)p->hooks.allocate(newsize);
+ if (!newbuffer)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+
+ memcpy(newbuffer, p->buffer, p->offset + 1);
+ p->hooks.deallocate(p->buffer);
+ }
+ p->length = newsize;
+ p->buffer = newbuffer;
+
+ return newbuffer + p->offset;
+}
+
+/* calculate the new length of the string in a printbuffer and update the offset */
+static void update_offset(printbuffer * const buffer) // NOLINT
+{
+ const unsigned char *buffer_pointer = NULL;
+ if ((buffer == NULL) || (buffer->buffer == NULL))
+ {
+ return;
+ }
+ buffer_pointer = buffer->buffer + buffer->offset;
+
+ buffer->offset += strlen((const char*)buffer_pointer);
+}
+
+/* securely comparison of floating-point variables */
+static cJSON_bool compare_double(double a, double b) // NOLINT
+{
+ double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b);
+ return (fabs(a - b) <= maxVal * DBL_EPSILON);
+}
+
+/* Render the number nicely from the given item into a string. */
+static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) // NOLINT
+{
+ unsigned char *output_pointer = NULL;
+ double d = item->valuedouble;
+ int length = 0;
+ size_t i = 0;
+ unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */
+ unsigned char decimal_point = get_decimal_point(); // NOLINT
+ double test = 0.0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* This checks for NaN and Infinity */
+ if (isnan(d) || isinf(d))
+ {
+ length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "null");
+ }
+ else
+ {
+ /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */
+ length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "%1.15g", d);
+
+ /* Check whether the original double can be recovered */
+ if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d))
+ {
+ /* If not, print with 17 decimal places of precision */
+ length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "%1.17g", d);
+ }
+ }
+
+ /* sprintf failed or buffer overrun occurred */
+ if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1)))
+ {
+ return false;
+ }
+
+ /* reserve appropriate space in the output */
+ output_pointer = ensure(output_buffer, (size_t)length + sizeof(""));
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ /* copy the printed number to the output and replace locale
+ * dependent decimal point with '.' */
+ for (i = 0; i < ((size_t)length); i++)
+ {
+ if (number_buffer[i] == decimal_point)
+ {
+ output_pointer[i] = '.';
+ continue;
+ }
+
+ output_pointer[i] = number_buffer[i];
+ }
+ output_pointer[i] = '\0';
+
+ output_buffer->offset += (size_t)length;
+
+ return true;
+}
+
+/* parse 4 digit hexadecimal number */
+static unsigned parse_hex4(const unsigned char * const input)
+{
+ unsigned int h = 0;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++)
+ {
+ /* parse digit */
+ if ((input[i] >= '0') && (input[i] <= '9'))
+ {
+ h += (unsigned int) input[i] - '0';
+ }
+ else if ((input[i] >= 'A') && (input[i] <= 'F'))
+ {
+ h += (unsigned int) 10 + input[i] - 'A';
+ }
+ else if ((input[i] >= 'a') && (input[i] <= 'f'))
+ {
+ h += (unsigned int) 10 + input[i] - 'a';
+ }
+ else /* invalid */
+ {
+ return 0;
+ }
+
+ if (i < 3)
+ {
+ /* shift left to make place for the next nibble */
+ h = h << 4;
+ }
+ }
+
+ return h;
+}
+
+/* converts a UTF-16 literal to UTF-8
+* A literal can be one or two sequences of the form \uXXXX */
+static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer)
+{
+ long unsigned int codepoint = 0;
+ unsigned int first_code = 0;
+ const unsigned char *first_sequence = input_pointer;
+ unsigned char utf8_length = 0;
+ unsigned char utf8_position = 0;
+ unsigned char sequence_length = 0;
+ unsigned char first_byte_mark = 0;
+
+ if ((input_end - first_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ /* get the first utf16 sequence */
+ first_code = parse_hex4(first_sequence + 2);
+
+ /* check that the code is valid */
+ if (((first_code >= 0xDC00) && (first_code <= 0xDFFF)))
+ {
+ goto fail;
+ }
+
+ /* UTF16 surrogate pair */
+ if ((first_code >= 0xD800) && (first_code <= 0xDBFF))
+ {
+ const unsigned char *second_sequence = first_sequence + 6;
+ unsigned int second_code = 0;
+ sequence_length = 12; /* \uXXXX\uXXXX */
+
+ if ((input_end - second_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u'))
+ {
+ /* missing second half of the surrogate pair */
+ goto fail;
+ }
+
+ /* get the second utf16 sequence */
+ second_code = parse_hex4(second_sequence + 2);
+ /* check that the code is valid */
+ if ((second_code < 0xDC00) || (second_code > 0xDFFF))
+ {
+ /* invalid second half of the surrogate pair */
+ goto fail;
+ }
+
+
+ /* calculate the unicode codepoint from the surrogate pair */
+ codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF));
+ }
+ else
+ {
+ sequence_length = 6; /* \uXXXX */
+ codepoint = first_code;
+ }
+
+ /* encode as UTF-8
+ * takes at maximum 4 bytes to encode:
+ * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ if (codepoint < 0x80)
+ {
+ /* normal ascii, encoding 0xxxxxxx */
+ utf8_length = 1;
+ }
+ else if (codepoint < 0x800)
+ {
+ /* two bytes, encoding 110xxxxx 10xxxxxx */
+ utf8_length = 2;
+ first_byte_mark = 0xC0; /* 11000000 */
+ }
+ else if (codepoint < 0x10000)
+ {
+ /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 3;
+ first_byte_mark = 0xE0; /* 11100000 */
+ }
+ else if (codepoint <= 0x10FFFF)
+ {
+ /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 4;
+ first_byte_mark = 0xF0; /* 11110000 */
+ }
+ else
+ {
+ /* invalid unicode codepoint */
+ goto fail;
+ }
+
+ /* encode as utf8 */
+ for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--)
+ {
+ /* 10xxxxxx */
+ (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF);
+ codepoint >>= 6;
+ }
+ /* encode first byte */
+ if (utf8_length > 1)
+ {
+ (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF);
+ }
+ else
+ {
+ (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F);
+ }
+
+ *output_pointer += utf8_length;
+
+ return sequence_length;
+
+fail:
+ return 0;
+}
+
+/* Parse the input text into an unescaped cinput, and populate item. */
+static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) // NOLINT
+{
+ const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1;
+ const unsigned char *input_end = buffer_at_offset(input_buffer) + 1;
+ unsigned char *output_pointer = NULL;
+ unsigned char *output = NULL;
+
+ /* not a string */
+ if (buffer_at_offset(input_buffer)[0] != '\"')
+ {
+ goto fail;
+ }
+
+ {
+ /* calculate approximate size of the output (overestimate) */
+ size_t allocation_length = 0;
+ size_t skipped_bytes = 0;
+ while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"'))
+ {
+ /* is escape sequence */
+ if (input_end[0] == '\\')
+ {
+ if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length)
+ {
+ /* prevent buffer overflow when last input character is a backslash */
+ goto fail;
+ }
+ skipped_bytes++;
+ input_end++;
+ }
+ input_end++;
+ }
+ if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"'))
+ {
+ goto fail; /* string ended unexpectedly */
+ }
+
+ /* This is at most how much we need for the output */
+ allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes;
+ output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof(""));
+ if (output == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+ }
+
+ output_pointer = output;
+ /* loop through the string literal */
+ while (input_pointer < input_end)
+ {
+ if (*input_pointer != '\\')
+ {
+ *output_pointer++ = *input_pointer++;
+ }
+ /* escape sequence */
+ else
+ {
+ unsigned char sequence_length = 2;
+ if ((input_end - input_pointer) < 1)
+ {
+ goto fail;
+ }
+
+ switch (input_pointer[1])
+ {
+ case 'b':
+ *output_pointer++ = '\b';
+ break;
+ case 'f':
+ *output_pointer++ = '\f';
+ break;
+ case 'n':
+ *output_pointer++ = '\n';
+ break;
+ case 'r':
+ *output_pointer++ = '\r';
+ break;
+ case 't':
+ *output_pointer++ = '\t';
+ break;
+ case '\"':
+ case '\\':
+ case '/':
+ *output_pointer++ = input_pointer[1];
+ break;
+
+ /* UTF-16 literal */
+ case 'u':
+ sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer);
+ if (sequence_length == 0)
+ {
+ /* failed to convert UTF16-literal to UTF-8 */
+ goto fail;
+ }
+ break;
+
+ default:
+ goto fail;
+ }
+ input_pointer += sequence_length;
+ }
+ }
+
+ /* zero terminate the output */
+ *output_pointer = '\0';
+
+ item->type = cJSON_String;
+ item->valuestring = (char*)output;
+
+ input_buffer->offset = (size_t) (input_end - input_buffer->content);
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (output != NULL)
+ {
+ input_buffer->hooks.deallocate(output);
+ }
+
+ if (input_pointer != NULL)
+ {
+ input_buffer->offset = (size_t)(input_pointer - input_buffer->content);
+ }
+
+ return false;
+}
+
+/* Render the cstring provided to an escaped version that can be printed. */
+static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) // NOLINT
+{
+ const unsigned char *input_pointer = NULL;
+ unsigned char *output = NULL;
+ unsigned char *output_pointer = NULL;
+ size_t output_length = 0;
+ /* numbers of additional characters needed for escaping */
+ size_t escape_characters = 0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* empty string */
+ if (input == NULL)
+ {
+ output = ensure(output_buffer, sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "\"\"", 3); /* NOLINT */
+
+ return true;
+ }
+
+ /* set "flag" to 1 if something needs to be escaped */
+ for (input_pointer = input; *input_pointer; input_pointer++)
+ {
+ switch (*input_pointer)
+ {
+ case '\"':
+ case '\\':
+ case '\b':
+ case '\f':
+ case '\n':
+ case '\r':
+ case '\t':
+ /* one character escape sequence */
+ escape_characters++;
+ break;
+ default:
+ if (*input_pointer < 32)
+ {
+ /* UTF-16 escape sequence uXXXX */
+ escape_characters += 5;
+ }
+ break;
+ }
+ }
+ output_length = (size_t)(input_pointer - input) + escape_characters;
+
+ output = ensure(output_buffer, output_length + sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+
+ /* no characters have to be escaped */
+ if (escape_characters == 0)
+ {
+ output[0] = '\"';
+ memcpy(output + 1, input, output_length);
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+ }
+
+ output[0] = '\"';
+ output_pointer = output + 1;
+ /* copy the string */
+ for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++)
+ {
+ if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\'))
+ {
+ /* normal character, copy */
+ *output_pointer = *input_pointer;
+ }
+ else
+ {
+ /* character needs to be escaped */
+ *output_pointer++ = '\\';
+ switch (*input_pointer)
+ {
+ case '\\':
+ *output_pointer = '\\';
+ break;
+ case '\"':
+ *output_pointer = '\"';
+ break;
+ case '\b':
+ *output_pointer = 'b';
+ break;
+ case '\f':
+ *output_pointer = 'f';
+ break;
+ case '\n':
+ *output_pointer = 'n';
+ break;
+ case '\r':
+ *output_pointer = 'r';
+ break;
+ case '\t':
+ *output_pointer = 't';
+ break;
+ default:
+ /* escape and print as unicode codepoint */
+ snprintf((char*)output_pointer, 6 * sizeof(char), "u%04x", *input_pointer);
+ output_pointer += 4;
+ break;
+ }
+ }
+ }
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+}
+
+/* Invoke print_string_ptr (which is useful) on an item. */
+static cJSON_bool print_string(const cJSON * const item, printbuffer * const p)
+{
+ return print_string_ptr((unsigned char*)item->valuestring, p);
+}
+
+/* Predeclare these prototypes. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer);
+
+/* Utility to jump whitespace and cr/lf */
+static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) // NOLINT
+{
+ if ((buffer == NULL) || (buffer->content == NULL))
+ {
+ return NULL;
+ }
+
+ if (cannot_access_at_index(buffer, 0))
+ {
+ return buffer;
+ }
+
+ while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32))
+ {
+ buffer->offset++;
+ }
+
+ if (buffer->offset == buffer->length)
+ {
+ buffer->offset--;
+ }
+
+ return buffer;
+}
+
+/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */
+static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) // NOLINT
+{
+ if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0))
+ {
+ return NULL;
+ }
+
+ if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0))
+ {
+ buffer->offset += 3;
+ }
+
+ return buffer;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ size_t buffer_length;
+
+ if (NULL == value)
+ {
+ return NULL;
+ }
+
+ /* Adding null character size due to require_null_terminated. */
+ buffer_length = strlen(value) + sizeof("");
+
+ return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated);
+}
+
+/* Parse an object - create a new root, and populate. */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } };
+ cJSON *item = NULL;
+
+ /* reset error position */
+ global_error.json = NULL;
+ global_error.position = 0;
+
+ if (value == NULL || 0 == buffer_length)
+ {
+ goto fail;
+ }
+
+ buffer.content = (const unsigned char*)value;
+ buffer.length = buffer_length;
+ buffer.offset = 0;
+ buffer.hooks = global_hooks;
+
+ item = cJSON_New_Item(&global_hooks);
+ if (item == NULL) /* memory fail */
+ {
+ goto fail;
+ }
+
+ if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer))))
+ {
+ /* parse failure. ep is set. */
+ goto fail;
+ }
+
+ /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */
+ if (require_null_terminated)
+ {
+ buffer_skip_whitespace(&buffer);
+ if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0')
+ {
+ goto fail;
+ }
+ }
+ if (return_parse_end)
+ {
+ *return_parse_end = (const char*)buffer_at_offset(&buffer);
+ }
+
+ return item;
+
+fail:
+ if (item != NULL)
+ {
+ cJSON_Delete(item);
+ }
+
+ if (value != NULL)
+ {
+ error local_error;
+ local_error.json = (const unsigned char*)value;
+ local_error.position = 0;
+
+ if (buffer.offset < buffer.length)
+ {
+ local_error.position = buffer.offset;
+ }
+ else if (buffer.length > 0)
+ {
+ local_error.position = buffer.length - 1;
+ }
+
+ if (return_parse_end != NULL)
+ {
+ *return_parse_end = (const char*)local_error.json + local_error.position;
+ }
+
+ global_error = local_error;
+ }
+
+ return NULL;
+}
+
+/* Default options for cJSON_Parse */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value)
+{
+ return cJSON_ParseWithOpts(value, 0, 0);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length)
+{
+ return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0);
+}
+
+#define cjson_min(a, b) (((a) < (b)) ? (a) : (b))
+
+static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks)
+{
+ static const size_t default_buffer_size = 256;
+ printbuffer buffer[1];
+ unsigned char *printed = NULL;
+
+ memset(buffer, 0, sizeof(buffer));
+
+ /* create buffer */
+ buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size);
+ buffer->length = default_buffer_size;
+ buffer->format = format;
+ buffer->hooks = *hooks;
+ if (buffer->buffer == NULL)
+ {
+ goto fail;
+ }
+
+ /* print the value */
+ if (!print_value(item, buffer))
+ {
+ goto fail;
+ }
+ update_offset(buffer);
+
+ /* check if reallocate is available */
+ if (hooks->reallocate != NULL)
+ {
+ printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1);
+ if (printed == NULL) {
+ goto fail;
+ }
+ buffer->buffer = NULL;
+ }
+ else /* otherwise copy the JSON over to a new buffer */
+ {
+ printed = (unsigned char*) hooks->allocate(buffer->offset + 1);
+ if (printed == NULL)
+ {
+ goto fail;
+ }
+ memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1));
+ printed[buffer->offset] = '\0'; /* just to be sure */
+
+ /* free the buffer */
+ hooks->deallocate(buffer->buffer);
+ }
+
+ return printed;
+
+fail:
+ if (buffer->buffer != NULL)
+ {
+ hooks->deallocate(buffer->buffer);
+ }
+
+ if (printed != NULL)
+ {
+ hooks->deallocate(printed);
+ }
+
+ return NULL;
+}
+
+/* Render a cJSON item/entity/structure to text. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item)
+{
+ return (char*)print(item, true, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item)
+{
+ return (char*)print(item, false, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if (prebuffer < 0)
+ {
+ return NULL;
+ }
+
+ p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer);
+ if (!p.buffer)
+ {
+ return NULL;
+ }
+
+ p.length = (size_t)prebuffer;
+ p.offset = 0;
+ p.noalloc = false;
+ p.format = fmt;
+ p.hooks = global_hooks;
+
+ if (!print_value(item, &p))
+ {
+ global_hooks.deallocate(p.buffer);
+ return NULL;
+ }
+
+ return (char*)p.buffer;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if ((length < 0) || (buffer == NULL))
+ {
+ return false;
+ }
+
+ p.buffer = (unsigned char*)buffer;
+ p.length = (size_t)length;
+ p.offset = 0;
+ p.noalloc = true;
+ p.format = format;
+ p.hooks = global_hooks;
+
+ return print_value(item, &p);
+}
+
+/* Parser core - when encountering text, process appropriately. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer)
+{
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false; /* no input */
+ }
+
+ /* parse the different types of values */
+ /* null */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0))
+ {
+ item->type = cJSON_NULL;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* false */
+ if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0))
+ {
+ item->type = cJSON_False;
+ input_buffer->offset += 5;
+ return true;
+ }
+ /* true */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0))
+ {
+ item->type = cJSON_True;
+ item->valueint = 1;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* string */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"'))
+ {
+ return parse_string(item, input_buffer);
+ }
+ /* number */
+ if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9'))))
+ {
+ return parse_number(item, input_buffer);
+ }
+ /* array */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '['))
+ {
+ return parse_array(item, input_buffer);
+ }
+ /* object */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{'))
+ {
+ return parse_object(item, input_buffer);
+ }
+
+ return false;
+}
+
+/* Render a value to text. */
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output = NULL;
+
+ if ((item == NULL) || (output_buffer == NULL))
+ {
+ return false;
+ }
+
+ switch ((item->type) & 0xFF)
+ {
+ case cJSON_NULL:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "null", 5); /* NOLINT */
+ return true;
+
+ case cJSON_False:
+ output = ensure(output_buffer, 6);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "false", 6); /* NOLINT */
+ return true;
+
+ case cJSON_True:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "true", 5); /* NOLINT */
+ return true;
+
+ case cJSON_Number:
+ return print_number(item, output_buffer);
+
+ case cJSON_Raw:
+ {
+ size_t raw_length = 0;
+ if (item->valuestring == NULL)
+ {
+ return false;
+ }
+
+ raw_length = strlen(item->valuestring) + sizeof("");
+ output = ensure(output_buffer, raw_length);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, item->valuestring, raw_length);
+ return true;
+ }
+
+ case cJSON_String:
+ return print_string(item, output_buffer);
+
+ case cJSON_Array:
+ return print_array(item, output_buffer);
+
+ case cJSON_Object:
+ return print_object(item, output_buffer);
+
+ default:
+ return false;
+ }
+}
+
+/* Build an array from input text. */
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* head of the linked list */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (buffer_at_offset(input_buffer)[0] != '[')
+ {
+ /* not an array */
+ goto fail;
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']'))
+ {
+ /* empty array */
+ goto success;
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse next value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']')
+ {
+ goto fail; /* expected end of array */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Array;
+ item->child = head;
+
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an array to text */
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_element = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output array. */
+ /* opening square bracket */
+ output_pointer = ensure(output_buffer, 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer = '[';
+ output_buffer->offset++;
+ output_buffer->depth++;
+
+ while (current_element != NULL)
+ {
+ if (!print_value(current_element, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+ if (current_element->next)
+ {
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ',';
+ if(output_buffer->format)
+ {
+ *output_pointer++ = ' ';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+ }
+ current_element = current_element->next;
+ }
+
+ output_pointer = ensure(output_buffer, 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ']';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Build an object from the text. */
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* linked list head */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{'))
+ {
+ goto fail; /* not an object */
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}'))
+ {
+ goto success; /* empty object */
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse the name of the child */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_string(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse name */
+ }
+ buffer_skip_whitespace(input_buffer);
+
+ /* swap valuestring and string, because we parsed the name */
+ current_item->string = current_item->valuestring;
+ current_item->valuestring = NULL;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':'))
+ {
+ goto fail; /* invalid object */
+ }
+
+ /* parse the value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}'))
+ {
+ goto fail; /* expected end of object */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Object;
+ item->child = head;
+
+ input_buffer->offset++;
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an object to text. */
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_item = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output: */
+ length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer++ = '{';
+ output_buffer->depth++;
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ output_buffer->offset += length;
+
+ while (current_item)
+ {
+ if (output_buffer->format)
+ {
+ size_t i;
+ output_pointer = ensure(output_buffer, output_buffer->depth);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ for (i = 0; i < output_buffer->depth; i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += output_buffer->depth;
+ }
+
+ /* print key */
+ if (!print_string_ptr((unsigned char*)current_item->string, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ':';
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += length;
+
+ /* print value */
+ if (!print_value(current_item, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ /* print comma if not last */
+ length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0));
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (current_item->next)
+ {
+ *output_pointer++ = ',';
+ }
+
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+
+ current_item = current_item->next;
+ }
+
+ output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (output_buffer->format)
+ {
+ size_t i;
+ for (i = 0; i < (output_buffer->depth - 1); i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ }
+ *output_pointer++ = '}';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Get Array size/item / object item. */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array)
+{
+ cJSON *child = NULL;
+ size_t size = 0;
+
+ if (array == NULL)
+ {
+ return 0;
+ }
+
+ child = array->child;
+
+ while(child != NULL)
+ {
+ size++;
+ child = child->next;
+ }
+
+ /* FIXME: Can overflow here. Cannot be fixed without breaking the API */
+
+ return (int)size;
+}
+
+static cJSON* get_array_item(const cJSON *array, size_t index)
+{
+ cJSON *current_child = NULL;
+
+ if (array == NULL)
+ {
+ return NULL;
+ }
+
+ current_child = array->child;
+ while ((current_child != NULL) && (index > 0))
+ {
+ index--;
+ current_child = current_child->next;
+ }
+
+ return current_child;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index)
+{
+ if (index < 0)
+ {
+ return NULL;
+ }
+
+ return get_array_item(array, (size_t)index);
+}
+
+static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
+{
+ cJSON *current_element = NULL;
+
+ if ((object == NULL) || (name == NULL))
+ {
+ return NULL;
+ }
+
+ current_element = object->child;
+ if (case_sensitive)
+ {
+ while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+ else
+ {
+ while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+
+ if ((current_element == NULL) || (current_element->string == NULL)) {
+ return NULL;
+ }
+
+ return current_element;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, false);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string)
+{
+ return cJSON_GetObjectItem(object, string) ? 1 : 0;
+}
+
+/* Utility for array list handling. */
+static void suffix_object(cJSON *prev, cJSON *item)
+{
+ prev->next = item;
+ item->prev = prev;
+}
+
+/* Utility for handling references. */
+static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks)
+{
+ cJSON *reference = NULL;
+ if (item == NULL)
+ {
+ return NULL;
+ }
+
+ reference = cJSON_New_Item(hooks);
+ if (reference == NULL)
+ {
+ return NULL;
+ }
+
+ memcpy(reference, item, sizeof(cJSON));
+ reference->string = NULL;
+ reference->type |= cJSON_IsReference;
+ reference->next = reference->prev = NULL;
+ return reference;
+}
+
+static cJSON_bool add_item_to_array(cJSON *array, cJSON *item)
+{
+ cJSON *child = NULL;
+
+ if ((item == NULL) || (array == NULL) || (array == item))
+ {
+ return false;
+ }
+
+ child = array->child;
+ /*
+ * To find the last item in array quickly, we use prev in array
+ */
+ if (child == NULL)
+ {
+ /* list is empty, start new one */
+ array->child = item;
+ item->prev = item;
+ item->next = NULL;
+ }
+ else
+ {
+ /* append to the end */
+ if (child->prev)
+ {
+ suffix_object(child->prev, item);
+ array->child->prev = item;
+ }
+ }
+
+ return true;
+}
+
+/* Add item to array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item)
+{
+ return add_item_to_array(array, item);
+}
+
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic push
+#endif
+#ifdef __GNUC__
+ #if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 1)))
+ #pragma GCC diagnostic ignored "-Wcast-qual"
+ #endif
+#endif
+/* helper function to cast away const */
+static void* cast_away_const(const void* string)
+{
+ return (void*)string;
+}
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic pop
+#endif
+
+
+static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key)
+{
+ char *new_key = NULL;
+ int new_type = cJSON_Invalid;
+
+ if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item))
+ {
+ return false;
+ }
+
+ if (constant_key)
+ {
+ new_key = (char*)cast_away_const(string);
+ new_type = item->type | cJSON_StringIsConst;
+ }
+ else
+ {
+ new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks);
+ if (new_key == NULL)
+ {
+ return false;
+ }
+
+ new_type = item->type & ~cJSON_StringIsConst;
+ }
+
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ hooks->deallocate(item->string);
+ }
+
+ item->string = new_key;
+ item->type = new_type;
+
+ return add_item_to_array(object, item);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, false);
+}
+
+/* Add an item to an object with constant string as key */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item)
+{
+ if (array == NULL)
+ {
+ return false;
+ }
+
+ return add_item_to_array(array, create_reference(item, &global_hooks));
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item)
+{
+ if ((object == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false);
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name)
+{
+ cJSON *null = cJSON_CreateNull();
+ if (add_item_to_object(object, name, null, &global_hooks, false))
+ {
+ return null;
+ }
+
+ cJSON_Delete(null);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name)
+{
+ cJSON *true_item = cJSON_CreateTrue();
+ if (add_item_to_object(object, name, true_item, &global_hooks, false))
+ {
+ return true_item;
+ }
+
+ cJSON_Delete(true_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name)
+{
+ cJSON *false_item = cJSON_CreateFalse();
+ if (add_item_to_object(object, name, false_item, &global_hooks, false))
+ {
+ return false_item;
+ }
+
+ cJSON_Delete(false_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean)
+{
+ cJSON *bool_item = cJSON_CreateBool(boolean);
+ if (add_item_to_object(object, name, bool_item, &global_hooks, false))
+ {
+ return bool_item;
+ }
+
+ cJSON_Delete(bool_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number)
+{
+ cJSON *number_item = cJSON_CreateNumber(number);
+ if (add_item_to_object(object, name, number_item, &global_hooks, false))
+ {
+ return number_item;
+ }
+
+ cJSON_Delete(number_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string)
+{
+ cJSON *string_item = cJSON_CreateString(string);
+ if (add_item_to_object(object, name, string_item, &global_hooks, false))
+ {
+ return string_item;
+ }
+
+ cJSON_Delete(string_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw)
+{
+ cJSON *raw_item = cJSON_CreateRaw(raw);
+ if (add_item_to_object(object, name, raw_item, &global_hooks, false))
+ {
+ return raw_item;
+ }
+
+ cJSON_Delete(raw_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name)
+{
+ cJSON *object_item = cJSON_CreateObject();
+ if (add_item_to_object(object, name, object_item, &global_hooks, false))
+ {
+ return object_item;
+ }
+
+ cJSON_Delete(object_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name)
+{
+ cJSON *array = cJSON_CreateArray();
+ if (add_item_to_object(object, name, array, &global_hooks, false))
+ {
+ return array;
+ }
+
+ cJSON_Delete(array);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item)
+{
+ if ((parent == NULL) || (item == NULL))
+ {
+ return NULL;
+ }
+
+ if (item != parent->child)
+ {
+ /* not the first element */
+ item->prev->next = item->next;
+ }
+ if (item->next != NULL)
+ {
+ /* not the last element */
+ item->next->prev = item->prev;
+ }
+
+ if (item == parent->child)
+ {
+ /* first element */
+ parent->child = item->next;
+ }
+ else if (item->next == NULL)
+ {
+ /* last element */
+ parent->child->prev = item->prev;
+ }
+
+ /* make sure the detached item doesn't point anywhere anymore */
+ item->prev = NULL;
+ item->next = NULL;
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which)
+{
+ if (which < 0)
+ {
+ return NULL;
+ }
+
+ return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which)
+{
+ cJSON_Delete(cJSON_DetachItemFromArray(array, which));
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItem(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObject(object, string));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string));
+}
+
+/* Replace array/object items with new ones. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ cJSON *after_inserted = NULL;
+
+ if (which < 0)
+ {
+ return false;
+ }
+
+ after_inserted = get_array_item(array, (size_t)which);
+ if (after_inserted == NULL)
+ {
+ return add_item_to_array(array, newitem);
+ }
+
+ newitem->next = after_inserted;
+ newitem->prev = after_inserted->prev;
+ after_inserted->prev = newitem;
+ if (after_inserted == array->child)
+ {
+ array->child = newitem;
+ }
+ else
+ {
+ newitem->prev->next = newitem;
+ }
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement)
+{
+ if ((parent == NULL) || (replacement == NULL) || (item == NULL))
+ {
+ return false;
+ }
+
+ if (replacement == item)
+ {
+ return true;
+ }
+
+ replacement->next = item->next;
+ replacement->prev = item->prev;
+
+ if (replacement->next != NULL)
+ {
+ replacement->next->prev = replacement;
+ }
+ if (parent->child == item)
+ {
+ if (parent->child->prev == parent->child)
+ {
+ replacement->prev = replacement;
+ }
+ parent->child = replacement;
+ }
+ else
+ { /*
+ * To find the last item in array quickly, we use prev in array.
+ * We can't modify the last item's next pointer where this item was the parent's child
+ */
+ if (replacement->prev != NULL)
+ {
+ replacement->prev->next = replacement;
+ }
+ if (replacement->next == NULL)
+ {
+ parent->child->prev = replacement;
+ }
+ }
+
+ item->next = NULL;
+ item->prev = NULL;
+ cJSON_Delete(item);
+
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ if (which < 0)
+ {
+ return false;
+ }
+
+ return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem);
+}
+
+static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive)
+{
+ if ((replacement == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ /* replace the name in the replacement */
+ if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL))
+ {
+ cJSON_free(replacement->string);
+ }
+ replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ replacement->type &= ~cJSON_StringIsConst;
+
+ return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, false);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, true);
+}
+
+/* Create basic types: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_NULL;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_True;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = boolean ? cJSON_True : cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Number;
+ item->valuedouble = num;
+
+ /* use saturation in case of overflow */
+ if (num >= INT_MAX)
+ {
+ item->valueint = INT_MAX;
+ }
+ else if (num <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)num;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_String;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL)
+ {
+ item->type = cJSON_String | cJSON_IsReference;
+ item->valuestring = (char*)cast_away_const(string);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Object | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Array | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Raw;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type=cJSON_Array;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item)
+ {
+ item->type = cJSON_Object;
+ }
+
+ return item;
+}
+
+/* Create Arrays: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if (!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber((double)numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (strings == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for (i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateString(strings[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p,n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+/* Duplication */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse)
+{
+ cJSON *newitem = NULL;
+ cJSON *child = NULL;
+ cJSON *next = NULL;
+ cJSON *newchild = NULL;
+
+ /* Bail on bad ptr */
+ if (!item)
+ {
+ goto fail;
+ }
+ /* Create new item */
+ newitem = cJSON_New_Item(&global_hooks);
+ if (!newitem)
+ {
+ goto fail;
+ }
+ /* Copy over all vars */
+ newitem->type = item->type & (~cJSON_IsReference);
+ newitem->valueint = item->valueint;
+ newitem->valuedouble = item->valuedouble;
+ if (item->valuestring)
+ {
+ newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks);
+ if (!newitem->valuestring)
+ {
+ goto fail;
+ }
+ }
+ if (item->string)
+ {
+ newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks);
+ if (!newitem->string)
+ {
+ goto fail;
+ }
+ }
+ /* If non-recursive, then we're done! */
+ if (!recurse)
+ {
+ return newitem;
+ }
+ /* Walk the ->next chain for the child. */
+ child = item->child;
+ while (child != NULL)
+ {
+ newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */
+ if (!newchild)
+ {
+ goto fail;
+ }
+ if (next != NULL)
+ {
+ /* If newitem->child already set, then crosswire ->prev and ->next and move on */
+ next->next = newchild;
+ newchild->prev = next;
+ next = newchild;
+ }
+ else
+ {
+ /* Set newitem->child and move to it */
+ newitem->child = newchild;
+ next = newchild;
+ }
+ child = child->next;
+ }
+ if (newitem && newitem->child)
+ {
+ newitem->child->prev = newchild;
+ }
+
+ return newitem;
+
+fail:
+ if (newitem != NULL)
+ {
+ cJSON_Delete(newitem);
+ }
+
+ return NULL;
+}
+
+static void skip_oneline_comment(char **input)
+{
+ *input += static_strlen("//");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if ((*input)[0] == '\n') {
+ *input += static_strlen("\n");
+ return;
+ }
+ }
+}
+
+static void skip_multiline_comment(char **input)
+{
+ *input += static_strlen("/*");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if (((*input)[0] == '*') && ((*input)[1] == '/'))
+ {
+ *input += static_strlen("*/");
+ return;
+ }
+ }
+}
+
+static void minify_string(char **input, char **output) {
+ (*output)[0] = (*input)[0];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+
+
+ for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) {
+ (*output)[0] = (*input)[0];
+
+ if ((*input)[0] == '\"') {
+ (*output)[0] = '\"';
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ return;
+ } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) {
+ (*output)[1] = (*input)[1];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ }
+ }
+}
+
+CJSON_PUBLIC(void) cJSON_Minify(char *json)
+{
+ char *into = json;
+
+ if (json == NULL)
+ {
+ return;
+ }
+
+ while (json[0] != '\0')
+ {
+ switch (json[0])
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ json++;
+ break;
+
+ case '/':
+ if (json[1] == '/')
+ {
+ skip_oneline_comment(&json);
+ }
+ else if (json[1] == '*')
+ {
+ skip_multiline_comment(&json);
+ } else {
+ json++;
+ }
+ break;
+
+ case '\"':
+ minify_string(&json, (char**)&into);
+ break;
+
+ default:
+ into[0] = json[0];
+ json++;
+ into++;
+ }
+ }
+
+ /* and null-terminate. */
+ *into = '\0';
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Invalid;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_False;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xff) == cJSON_True;
+}
+
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & (cJSON_True | cJSON_False)) != 0;
+}
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_NULL;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Number;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_String;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Array;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Object;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Raw;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive)
+{
+ if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)))
+ {
+ return false;
+ }
+
+ /* check if type is valid */
+ switch (a->type & 0xFF)
+ {
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ case cJSON_Number:
+ case cJSON_String:
+ case cJSON_Raw:
+ case cJSON_Array:
+ case cJSON_Object:
+ break;
+
+ default:
+ return false;
+ }
+
+ /* identical objects are equal */
+ if (a == b)
+ {
+ return true;
+ }
+
+ switch (a->type & 0xFF)
+ {
+ /* in these cases and equal type is enough */
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ return true;
+
+ case cJSON_Number:
+ if (compare_double(a->valuedouble, b->valuedouble))
+ {
+ return true;
+ }
+ return false;
+
+ case cJSON_String:
+ case cJSON_Raw:
+ if ((a->valuestring == NULL) || (b->valuestring == NULL))
+ {
+ return false;
+ }
+ if (strcmp(a->valuestring, b->valuestring) == 0)
+ {
+ return true;
+ }
+
+ return false;
+
+ case cJSON_Array:
+ {
+ cJSON *a_element = a->child;
+ cJSON *b_element = b->child;
+
+ for (; (a_element != NULL) && (b_element != NULL);)
+ {
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+
+ a_element = a_element->next;
+ b_element = b_element->next;
+ }
+
+ /* one of the arrays is longer than the other */
+ if (a_element != b_element) {
+ return false;
+ }
+
+ return true;
+ }
+
+ case cJSON_Object:
+ {
+ cJSON *a_element = NULL;
+ cJSON *b_element = NULL;
+ cJSON_ArrayForEach(a_element, a)
+ {
+ /* TODO This has O(n^2) runtime, which is horrible! */
+ b_element = get_object_item(b, a_element->string, case_sensitive);
+ if (b_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ /* doing this twice, once on a and b to prevent true comparison if a subset of b
+ * TODO: Do this the proper way, this is just a fix for now */
+ cJSON_ArrayForEach(b_element, b)
+ {
+ a_element = get_object_item(a, b_element->string, case_sensitive);
+ if (a_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(b_element, a_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size)
+{
+ return global_hooks.allocate(size);
+}
+
+CJSON_PUBLIC(void) cJSON_free(void *object)
+{
+ global_hooks.deallocate(object);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/file.c b/contrib/restricted/aws/aws-c-common/source/file.c
new file mode 100644
index 0000000000..a64453fd23
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/file.c
@@ -0,0 +1,171 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/file.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/logging.h>
+#include <aws/common/string.h>
+
+#include <errno.h>
+
+FILE *aws_fopen(const char *file_path, const char *mode) {
+ struct aws_string *file_path_str = aws_string_new_from_c_str(aws_default_allocator(), file_path);
+ struct aws_string *mode_str = aws_string_new_from_c_str(aws_default_allocator(), mode);
+
+ FILE *file = aws_fopen_safe(file_path_str, mode_str);
+ aws_string_destroy(mode_str);
+ aws_string_destroy(file_path_str);
+
+ return file;
+}
+
+int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) {
+ AWS_ZERO_STRUCT(*out_buf);
+ FILE *fp = aws_fopen(filename, "rb");
+
+ if (fp) {
+ if (fseek(fp, 0L, SEEK_END)) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno);
+ fclose(fp);
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ size_t allocation_size = (size_t)ftell(fp) + 1;
+ /* Tell the user that we allocate here and if success they're responsible for the free. */
+ if (aws_byte_buf_init(out_buf, alloc, allocation_size)) {
+ fclose(fp);
+ return AWS_OP_ERR;
+ }
+
+ /* Ensure compatibility with null-terminated APIs, but don't consider
+ * the null terminator part of the length of the payload */
+ out_buf->len = out_buf->capacity - 1;
+ out_buf->buffer[out_buf->len] = 0;
+
+ if (fseek(fp, 0L, SEEK_SET)) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno);
+ aws_byte_buf_clean_up(out_buf);
+ fclose(fp);
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ size_t read = fread(out_buf->buffer, 1, out_buf->len, fp);
+ fclose(fp);
+ if (read < out_buf->len) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to read file %s with errno %d", filename, errno);
+ aws_secure_zero(out_buf->buffer, out_buf->len);
+ aws_byte_buf_clean_up(out_buf);
+ return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file %s with errno %d", filename, errno);
+
+ return aws_translate_and_raise_io_error(errno);
+}
+
+bool aws_is_any_directory_separator(char value) {
+ return value == '\\' || value == '/';
+}
+
+struct aws_directory_iterator {
+ struct aws_linked_list list_data;
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node *current_node;
+};
+
+struct directory_entry_value {
+ struct aws_directory_entry entry;
+ struct aws_byte_buf path;
+ struct aws_byte_buf relative_path;
+ struct aws_linked_list_node node;
+};
+
+static bool s_directory_iterator_directory_entry(const struct aws_directory_entry *entry, void *user_data) {
+ struct aws_directory_iterator *iterator = user_data;
+ struct directory_entry_value *value = aws_mem_calloc(iterator->allocator, 1, sizeof(struct directory_entry_value));
+
+ value->entry = *entry;
+ aws_byte_buf_init_copy_from_cursor(&value->path, iterator->allocator, entry->path);
+ value->entry.path = aws_byte_cursor_from_buf(&value->path);
+ aws_byte_buf_init_copy_from_cursor(&value->relative_path, iterator->allocator, entry->relative_path);
+ value->entry.relative_path = aws_byte_cursor_from_buf(&value->relative_path);
+ aws_linked_list_push_back(&iterator->list_data, &value->node);
+
+ return true;
+}
+
+struct aws_directory_iterator *aws_directory_entry_iterator_new(
+ struct aws_allocator *allocator,
+ const struct aws_string *path) {
+ struct aws_directory_iterator *iterator = aws_mem_acquire(allocator, sizeof(struct aws_directory_iterator));
+ iterator->allocator = allocator;
+ aws_linked_list_init(&iterator->list_data);
+
+ /* the whole point of this iterator is to avoid recursion, so let's do that by passing recurse as false. */
+ if (AWS_OP_SUCCESS ==
+ aws_directory_traverse(allocator, path, false, s_directory_iterator_directory_entry, iterator)) {
+ if (!aws_linked_list_empty(&iterator->list_data)) {
+ iterator->current_node = aws_linked_list_front(&iterator->list_data);
+ }
+ return iterator;
+ }
+
+ aws_mem_release(allocator, iterator);
+ return NULL;
+}
+
+int aws_directory_entry_iterator_next(struct aws_directory_iterator *iterator) {
+ struct aws_linked_list_node *node = iterator->current_node;
+
+ if (!node || node->next == aws_linked_list_end(&iterator->list_data)) {
+ return aws_raise_error(AWS_ERROR_LIST_EMPTY);
+ }
+
+ iterator->current_node = aws_linked_list_next(node);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_directory_entry_iterator_previous(struct aws_directory_iterator *iterator) {
+ struct aws_linked_list_node *node = iterator->current_node;
+
+ if (!node || node == aws_linked_list_begin(&iterator->list_data)) {
+ return aws_raise_error(AWS_ERROR_LIST_EMPTY);
+ }
+
+ iterator->current_node = aws_linked_list_prev(node);
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_directory_entry_iterator_destroy(struct aws_directory_iterator *iterator) {
+ while (!aws_linked_list_empty(&iterator->list_data)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&iterator->list_data);
+ struct directory_entry_value *value = AWS_CONTAINER_OF(node, struct directory_entry_value, node);
+
+ aws_byte_buf_clean_up(&value->path);
+ aws_byte_buf_clean_up(&value->relative_path);
+
+ aws_mem_release(iterator->allocator, value);
+ }
+
+ aws_mem_release(iterator->allocator, iterator);
+}
+
+const struct aws_directory_entry *aws_directory_entry_iterator_get_value(
+ const struct aws_directory_iterator *iterator) {
+ struct aws_linked_list_node *node = iterator->current_node;
+
+ if (!iterator->current_node) {
+ return NULL;
+ }
+
+ struct directory_entry_value *value = AWS_CONTAINER_OF(node, struct directory_entry_value, node);
+ return &value->entry;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/hash_table.c b/contrib/restricted/aws/aws-c-common/source/hash_table.c
index a8125a2df1..88926e48f9 100644
--- a/contrib/restricted/aws/aws-c-common/source/hash_table.c
+++ b/contrib/restricted/aws/aws-c-common/source/hash_table.c
@@ -222,7 +222,6 @@ int aws_hash_table_init(
AWS_PRECONDITION(alloc != NULL);
AWS_PRECONDITION(hash_fn != NULL);
AWS_PRECONDITION(equals_fn != NULL);
-
struct hash_table_state template;
template.hash_fn = hash_fn;
template.equals_fn = equals_fn;
@@ -715,6 +714,13 @@ int aws_hash_table_foreach(
for (struct aws_hash_iter iter = aws_hash_iter_begin(map); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) {
int rv = callback(context, &iter.element);
+ if (rv & AWS_COMMON_HASH_TABLE_ITER_ERROR) {
+ int error = aws_last_error();
+ if (error == AWS_ERROR_SUCCESS) {
+ aws_raise_error(AWS_ERROR_UNKNOWN);
+ }
+ return AWS_OP_ERR;
+ }
if (rv & AWS_COMMON_HASH_TABLE_ITER_DELETE) {
aws_hash_iter_delete(&iter, false);
@@ -1000,7 +1006,7 @@ bool aws_hash_callback_string_eq(const void *a, const void *b) {
AWS_PRECONDITION(aws_string_is_valid(a));
AWS_PRECONDITION(aws_string_is_valid(b));
bool rval = aws_string_eq(a, b);
- AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
+ AWS_RETURN_WITH_POSTCONDITION(rval, aws_string_is_valid(a) && aws_string_is_valid(b));
}
void aws_hash_callback_string_destroy(void *a) {
diff --git a/contrib/restricted/aws/aws-c-common/source/json.c b/contrib/restricted/aws/aws-c-common/source/json.c
new file mode 100644
index 0000000000..0f1b810be5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/json.c
@@ -0,0 +1,344 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/string.h>
+
+#include <aws/common/json.h>
+
+#include <aws/common/external/cJSON.h>
+
+static struct aws_allocator *s_aws_json_module_allocator = NULL;
+static bool s_aws_json_module_initialized = false;
+
+struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string) {
+ struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string);
+ void *ret_val = cJSON_CreateString(aws_string_c_str(tmp));
+ aws_string_destroy_secure(tmp);
+ return ret_val;
+}
+
+struct aws_json_value *aws_json_value_new_number(struct aws_allocator *allocator, double number) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateNumber(number);
+}
+
+struct aws_json_value *aws_json_value_new_array(struct aws_allocator *allocator) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateArray();
+}
+
+struct aws_json_value *aws_json_value_new_boolean(struct aws_allocator *allocator, bool boolean) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateBool(boolean);
+}
+
+struct aws_json_value *aws_json_value_new_null(struct aws_allocator *allocator) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateNull();
+}
+
+struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateObject();
+}
+
+int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsString(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *output = aws_byte_cursor_from_c_str(cJSON_GetStringValue(cjson));
+ return AWS_OP_SUCCESS;
+}
+
+int aws_json_value_get_number(const struct aws_json_value *value, double *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsNumber(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *output = cjson->valuedouble;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsBool(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *output = cjson->type == cJSON_True;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_json_value_add_to_object(
+ struct aws_json_value *object,
+ struct aws_byte_cursor key,
+ struct aws_json_value *value) {
+
+ int result = AWS_OP_ERR;
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+
+ struct cJSON *cjson_value = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson_value)) {
+ result = aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+ if (cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+
+ cJSON_AddItemToObject(cjson, aws_string_c_str(tmp), cjson_value);
+ result = AWS_OP_SUCCESS;
+
+done:
+ aws_string_destroy_secure(tmp);
+ return result;
+}
+
+struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_value *object, struct aws_byte_cursor key) {
+
+ void *return_value = NULL;
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+ if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+
+ return_value = (void *)cJSON_GetObjectItem(cjson, aws_string_c_str(tmp));
+
+done:
+ aws_string_destroy_secure(tmp);
+ return return_value;
+}
+
+bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte_cursor key) {
+
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+ bool result = false;
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ goto done;
+ }
+ if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+ result = true;
+
+done:
+ aws_string_destroy_secure(tmp);
+ return result;
+}
+
+int aws_json_value_remove_from_object(struct aws_json_value *object, struct aws_byte_cursor key) {
+
+ int result = AWS_OP_ERR;
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+ if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+
+ cJSON_DeleteItemFromObject(cjson, aws_string_c_str(tmp));
+ result = AWS_OP_SUCCESS;
+
+done:
+ aws_string_destroy_secure(tmp);
+ return result;
+}
+
+int aws_json_value_add_array_element(struct aws_json_value *array, const struct aws_json_value *value) {
+
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct cJSON *cjson_value = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson_value)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ cJSON_AddItemToArray(cjson, cjson_value);
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index) {
+
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (index > (size_t)cJSON_GetArraySize(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ return NULL;
+ }
+
+ return (void *)cJSON_GetArrayItem(cjson, (int)index);
+}
+
+size_t aws_json_get_array_size(const struct aws_json_value *array) {
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return 0;
+ }
+ return cJSON_GetArraySize(cjson);
+}
+
+int aws_json_value_remove_array_element(struct aws_json_value *array, size_t index) {
+
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (index > (size_t)cJSON_GetArraySize(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ }
+
+ cJSON_DeleteItemFromArray(cjson, (int)index);
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_json_value_is_string(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsString(cjson);
+}
+
+bool aws_json_value_is_number(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsNumber(cjson);
+}
+
+bool aws_json_value_is_array(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsArray(cjson);
+}
+
+bool aws_json_value_is_boolean(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsBool(cjson);
+}
+
+bool aws_json_value_is_null(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsNull(cjson);
+}
+
+bool aws_json_value_is_object(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsObject(cjson);
+}
+
+static void *s_aws_cJSON_alloc(size_t sz) {
+ return aws_mem_acquire(s_aws_json_module_allocator, sz);
+}
+
+static void s_aws_cJSON_free(void *ptr) {
+ aws_mem_release(s_aws_json_module_allocator, ptr);
+}
+
+void aws_json_module_init(struct aws_allocator *allocator) {
+ if (!s_aws_json_module_initialized) {
+ s_aws_json_module_allocator = allocator;
+ struct cJSON_Hooks allocation_hooks = {.malloc_fn = s_aws_cJSON_alloc, .free_fn = s_aws_cJSON_free};
+ cJSON_InitHooks(&allocation_hooks);
+ s_aws_json_module_initialized = true;
+ }
+}
+
+void aws_json_module_cleanup(void) {
+ if (s_aws_json_module_initialized) {
+ s_aws_json_module_allocator = NULL;
+ s_aws_json_module_initialized = false;
+ }
+}
+
+void aws_json_value_destroy(struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsInvalid(cjson)) {
+ cJSON_Delete(cjson);
+ }
+}
+
+int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ char *tmp = cJSON_PrintUnformatted(cjson);
+ if (tmp == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ // Append the text to the byte buffer
+ struct aws_byte_cursor tmp_cursor = aws_byte_cursor_from_c_str(tmp);
+ int return_val = aws_byte_buf_append_dynamic_secure(output, &tmp_cursor);
+ s_aws_cJSON_free(tmp); // free the char* now that we do not need it
+ return return_val;
+}
+
+int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ char *tmp = cJSON_Print(cjson);
+ if (tmp == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ // Append the text to the byte buffer
+ struct aws_byte_cursor tmp_cursor = aws_byte_cursor_from_c_str(tmp);
+ int return_val = aws_byte_buf_append_dynamic_secure(output, &tmp_cursor);
+ s_aws_cJSON_free(tmp); // free the char* now that we do not need it
+ return return_val;
+}
+
+struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string) {
+ struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string);
+ struct cJSON *cjson = cJSON_Parse(aws_string_c_str(tmp));
+ aws_string_destroy_secure(tmp);
+ return (void *)cjson;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/log_writer.c b/contrib/restricted/aws/aws-c-common/source/log_writer.c
index 7b31e406d1..6eea2fc3c5 100644
--- a/contrib/restricted/aws/aws-c-common/source/log_writer.c
+++ b/contrib/restricted/aws/aws-c-common/source/log_writer.c
@@ -3,6 +3,7 @@
* SPDX-License-Identifier: Apache-2.0.
*/
+#include <aws/common/file.h>
#include <aws/common/log_writer.h>
#include <aws/common/string.h>
@@ -10,10 +11,6 @@
#include <errno.h>
#include <stdio.h>
-#ifdef _MSC_VER
-# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
-#endif /* _MSC_VER */
-
/*
* Basic log writer implementations - stdout, stderr, arbitrary file
*/
@@ -76,7 +73,7 @@ static int s_aws_file_writer_init_internal(
/* Open file if name passed in */
if (file_name_to_open != NULL) {
- impl->log_file = fopen(file_name_to_open, "a+");
+ impl->log_file = aws_fopen(file_name_to_open, "a+");
if (impl->log_file == NULL) {
aws_mem_release(allocator, impl);
return aws_translate_and_raise_io_error(errno);
diff --git a/contrib/restricted/aws/aws-c-common/source/logging.c b/contrib/restricted/aws/aws-c-common/source/logging.c
index 1b96e1cc6b..d7f0910da3 100644
--- a/contrib/restricted/aws/aws-c-common/source/logging.c
+++ b/contrib/restricted/aws/aws-c-common/source/logging.c
@@ -5,19 +5,18 @@
#include <aws/common/logging.h>
-#include <aws/common/string.h>
-
+#include <aws/common/file.h>
#include <aws/common/log_channel.h>
#include <aws/common/log_formatter.h>
#include <aws/common/log_writer.h>
#include <aws/common/mutex.h>
+#include <aws/common/string.h>
#include <errno.h>
#include <stdarg.h>
#if _MSC_VER
# pragma warning(disable : 4204) /* non-constant aggregate initializer */
-# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
#endif
/*
@@ -55,7 +54,11 @@ static struct aws_logger_vtable s_null_vtable = {
.clean_up = s_null_logger_clean_up,
};
-static struct aws_logger s_null_logger = {.vtable = &s_null_vtable, .allocator = NULL, .p_impl = NULL};
+static struct aws_logger s_null_logger = {
+ .vtable = &s_null_vtable,
+ .allocator = NULL,
+ .p_impl = NULL,
+};
/*
* Pipeline logger implementation
@@ -120,13 +123,22 @@ static enum aws_log_level s_aws_logger_pipeline_get_log_level(struct aws_logger
struct aws_logger_pipeline *impl = logger->p_impl;
- return impl->level;
+ return (enum aws_log_level)aws_atomic_load_int(&impl->level);
+}
+
+static int s_aws_logger_pipeline_set_log_level(struct aws_logger *logger, enum aws_log_level level) {
+ struct aws_logger_pipeline *impl = logger->p_impl;
+
+ aws_atomic_store_int(&impl->level, (size_t)level);
+
+ return AWS_OP_SUCCESS;
}
struct aws_logger_vtable g_pipeline_logger_owned_vtable = {
.get_log_level = s_aws_logger_pipeline_get_log_level,
.log = s_aws_logger_pipeline_log,
.clean_up = s_aws_logger_pipeline_owned_clean_up,
+ .set_log_level = s_aws_logger_pipeline_set_log_level,
};
int aws_logger_init_standard(
@@ -181,7 +193,7 @@ int aws_logger_init_standard(
impl->channel = channel;
impl->writer = writer;
impl->allocator = allocator;
- impl->level = options->level;
+ aws_atomic_store_int(&impl->level, (size_t)options->level);
logger->vtable = &g_pipeline_logger_owned_vtable;
logger->allocator = allocator;
@@ -224,6 +236,7 @@ static struct aws_logger_vtable s_pipeline_logger_unowned_vtable = {
.get_log_level = s_aws_logger_pipeline_get_log_level,
.log = s_aws_logger_pipeline_log,
.clean_up = s_aws_pipeline_logger_unowned_clean_up,
+ .set_log_level = s_aws_logger_pipeline_set_log_level,
};
int aws_logger_init_from_external(
@@ -244,7 +257,7 @@ int aws_logger_init_from_external(
impl->channel = channel;
impl->writer = writer;
impl->allocator = allocator;
- impl->level = level;
+ aws_atomic_store_int(&impl->level, (size_t)level);
logger->vtable = &s_pipeline_logger_unowned_vtable;
logger->allocator = allocator;
@@ -368,6 +381,17 @@ void aws_register_log_subject_info_list(struct aws_log_subject_info_list *log_su
const uint32_t min_range = log_subject_list->subject_list[0].subject_id;
const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS;
+#if DEBUG_BUILD
+ for (uint32_t i = 0; i < log_subject_list->count; ++i) {
+ const struct aws_log_subject_info *info = &log_subject_list->subject_list[i];
+ uint32_t expected_id = min_range + i;
+ if (expected_id != info->subject_id) {
+ fprintf(stderr, "\"%s\" is at wrong index in aws_log_subject_info[]\n", info->subject_name);
+ AWS_FATAL_ASSERT(0);
+ }
+ }
+#endif /* DEBUG_BUILD */
+
if (slot_index >= AWS_PACKAGE_SLOTS) {
/* This is an NDEBUG build apparently. Kill the process rather than
* corrupting heap. */
@@ -405,7 +429,7 @@ void aws_unregister_log_subject_info_list(struct aws_log_subject_info_list *log_
* no alloc implementation
*/
struct aws_logger_noalloc {
- enum aws_log_level level;
+ struct aws_atomic_var level;
FILE *file;
bool should_close;
struct aws_mutex lock;
@@ -415,7 +439,7 @@ static enum aws_log_level s_noalloc_stderr_logger_get_log_level(struct aws_logge
(void)subject;
struct aws_logger_noalloc *impl = logger->p_impl;
- return impl->level;
+ return (enum aws_log_level)aws_atomic_load_int(&impl->level);
}
#define MAXIMUM_NO_ALLOC_LOG_LINE_SIZE 8192
@@ -464,13 +488,15 @@ static int s_noalloc_stderr_logger_log(
aws_mutex_lock(&impl->lock);
+ int write_result = AWS_OP_SUCCESS;
if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) {
- return aws_translate_and_raise_io_error(errno);
+ aws_translate_and_raise_io_error(errno);
+ write_result = AWS_OP_ERR;
}
aws_mutex_unlock(&impl->lock);
- return AWS_OP_SUCCESS;
+ return write_result;
}
static void s_noalloc_stderr_logger_clean_up(struct aws_logger *logger) {
@@ -489,10 +515,19 @@ static void s_noalloc_stderr_logger_clean_up(struct aws_logger *logger) {
AWS_ZERO_STRUCT(*logger);
}
+int s_no_alloc_stderr_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level) {
+ struct aws_logger_noalloc *impl = logger->p_impl;
+
+ aws_atomic_store_int(&impl->level, (size_t)level);
+
+ return AWS_OP_SUCCESS;
+}
+
static struct aws_logger_vtable s_noalloc_stderr_vtable = {
.get_log_level = s_noalloc_stderr_logger_get_log_level,
.log = s_noalloc_stderr_logger_log,
.clean_up = s_noalloc_stderr_logger_clean_up,
+ .set_log_level = s_no_alloc_stderr_logger_set_log_level,
};
int aws_logger_init_noalloc(
@@ -506,12 +541,13 @@ int aws_logger_init_noalloc(
return AWS_OP_ERR;
}
- impl->level = options->level;
+ aws_atomic_store_int(&impl->level, (size_t)options->level);
+
if (options->file != NULL) {
impl->file = options->file;
impl->should_close = false;
} else { /* _MSC_VER */
- impl->file = fopen(options->filename, "w");
+ impl->file = aws_fopen(options->filename, "w");
impl->should_close = true;
}
@@ -523,3 +559,15 @@ int aws_logger_init_noalloc(
return AWS_OP_SUCCESS;
}
+
+int aws_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level) {
+ if (logger == NULL || logger->vtable == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (logger->vtable->set_log_level == NULL) {
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+
+ return logger->vtable->set_log_level(logger, level);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/memtrace.c b/contrib/restricted/aws/aws-c-common/source/memtrace.c
index 9b776211f9..7362e07a30 100644
--- a/contrib/restricted/aws/aws-c-common/source/memtrace.c
+++ b/contrib/restricted/aws/aws-c-common/source/memtrace.c
@@ -224,7 +224,7 @@ static int s_collect_stack_trace(void *context, struct aws_hash_element *item) {
struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller);
aws_byte_buf_append(&stacktrace, &cursor);
}
- free(symbols);
+ aws_mem_release(aws_default_allocator(), symbols);
/* record the resultant buffer as a string */
stack_info->trace = aws_string_new_from_array(aws_default_allocator(), stacktrace.buffer, stacktrace.len);
AWS_FATAL_ASSERT(stack_info->trace);
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/file.c b/contrib/restricted/aws/aws-c-common/source/posix/file.c
new file mode 100644
index 0000000000..7c26ade8c3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/posix/file.c
@@ -0,0 +1,279 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/environment.h>
+#include <aws/common/file.h>
+#include <aws/common/string.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode) {
+ return fopen(aws_string_c_str(file_path), aws_string_c_str(mode));
+}
+
+static int s_parse_and_raise_error(int errno_cpy) {
+ if (errno_cpy == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (errno_cpy == ENOENT || errno_cpy == ENOTDIR) {
+ return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
+ }
+
+ if (errno_cpy == EMFILE || errno_cpy == ENFILE) {
+ return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
+ }
+
+ if (errno_cpy == EACCES) {
+ return aws_raise_error(AWS_ERROR_NO_PERMISSION);
+ }
+
+ if (errno_cpy == ENOTEMPTY) {
+ return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY);
+ }
+
+ return aws_raise_error(AWS_ERROR_UNKNOWN);
+}
+
+int aws_directory_create(const struct aws_string *dir_path) {
+ int mkdir_ret = mkdir(aws_string_c_str(dir_path), S_IRWXU | S_IRWXG | S_IRWXO);
+
+ /** nobody cares if it already existed. */
+ if (mkdir_ret != 0 && errno != EEXIST) {
+ return s_parse_and_raise_error(errno);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_directory_exists(const struct aws_string *dir_path) {
+ struct stat dir_info;
+ if (lstat(aws_string_c_str(dir_path), &dir_info) == 0 && S_ISDIR(dir_info.st_mode)) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool s_delete_file_or_directory(const struct aws_directory_entry *entry, void *user_data) {
+ (void)user_data;
+
+ struct aws_allocator *allocator = aws_default_allocator();
+
+ struct aws_string *path_str = aws_string_new_from_cursor(allocator, &entry->relative_path);
+ int ret_val = AWS_OP_SUCCESS;
+
+ if (entry->file_type & AWS_FILE_TYPE_FILE) {
+ ret_val = aws_file_delete(path_str);
+ }
+
+ if (entry->file_type & AWS_FILE_TYPE_DIRECTORY) {
+ ret_val = aws_directory_delete(path_str, false);
+ }
+
+ aws_string_destroy(path_str);
+ return ret_val == AWS_OP_SUCCESS;
+}
+
+int aws_directory_delete(const struct aws_string *dir_path, bool recursive) {
+ if (!aws_directory_exists(dir_path)) {
+ return AWS_OP_SUCCESS;
+ }
+
+ int ret_val = AWS_OP_SUCCESS;
+
+ if (recursive) {
+ ret_val = aws_directory_traverse(aws_default_allocator(), dir_path, true, s_delete_file_or_directory, NULL);
+ }
+
+ if (ret_val && aws_last_error() == AWS_ERROR_FILE_INVALID_PATH) {
+ aws_reset_error();
+ return AWS_OP_SUCCESS;
+ }
+
+ if (ret_val) {
+ return AWS_OP_ERR;
+ }
+
+ int error_code = rmdir(aws_string_c_str(dir_path));
+
+ return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno);
+}
+
+int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to) {
+ int error_code = rename(aws_string_c_str(from), aws_string_c_str(to));
+
+ return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno);
+}
+
+int aws_file_delete(const struct aws_string *file_path) {
+ int error_code = unlink(aws_string_c_str(file_path));
+
+ if (!error_code || errno == ENOENT) {
+ return AWS_OP_SUCCESS;
+ }
+
+ return s_parse_and_raise_error(errno);
+}
+
+int aws_directory_traverse(
+ struct aws_allocator *allocator,
+ const struct aws_string *path,
+ bool recursive,
+ aws_on_directory_entry *on_entry,
+ void *user_data) {
+ DIR *dir = opendir(aws_string_c_str(path));
+
+ if (!dir) {
+ return s_parse_and_raise_error(errno);
+ }
+
+ struct aws_byte_cursor current_path = aws_byte_cursor_from_string(path);
+ if (current_path.ptr[current_path.len - 1] == AWS_PATH_DELIM) {
+ current_path.len -= 1;
+ }
+
+ struct dirent *dirent = NULL;
+ int ret_val = AWS_ERROR_SUCCESS;
+
+ errno = 0;
+ while (!ret_val && (dirent = readdir(dir)) != NULL) {
+ /* note: dirent->name_len is only defined on the BSDs, but not linux. It's not in the
+ * required posix spec. So we use dirent->d_name as a c string here. */
+ struct aws_byte_cursor name_component = aws_byte_cursor_from_c_str(dirent->d_name);
+
+ if (aws_byte_cursor_eq_c_str(&name_component, "..") || aws_byte_cursor_eq_c_str(&name_component, ".")) {
+ continue;
+ }
+
+ struct aws_byte_buf relative_path;
+ aws_byte_buf_init_copy_from_cursor(&relative_path, allocator, current_path);
+ aws_byte_buf_append_byte_dynamic(&relative_path, AWS_PATH_DELIM);
+ aws_byte_buf_append_dynamic(&relative_path, &name_component);
+ aws_byte_buf_append_byte_dynamic(&relative_path, 0);
+ relative_path.len -= 1;
+
+ struct aws_directory_entry entry;
+ AWS_ZERO_STRUCT(entry);
+
+ struct stat dir_info;
+ if (!lstat((const char *)relative_path.buffer, &dir_info)) {
+ if (S_ISDIR(dir_info.st_mode)) {
+ entry.file_type |= AWS_FILE_TYPE_DIRECTORY;
+ }
+ if (S_ISLNK(dir_info.st_mode)) {
+ entry.file_type |= AWS_FILE_TYPE_SYM_LINK;
+ }
+ if (S_ISREG(dir_info.st_mode)) {
+ entry.file_type |= AWS_FILE_TYPE_FILE;
+ entry.file_size = dir_info.st_size;
+ }
+
+ if (!entry.file_type) {
+ AWS_ASSERT("Unknown file type encountered");
+ }
+
+ entry.relative_path = aws_byte_cursor_from_buf(&relative_path);
+ const char *full_path = realpath((const char *)relative_path.buffer, NULL);
+
+ if (full_path) {
+ entry.path = aws_byte_cursor_from_c_str(full_path);
+ }
+
+ if (recursive && entry.file_type & AWS_FILE_TYPE_DIRECTORY) {
+ struct aws_string *rel_path_str = aws_string_new_from_cursor(allocator, &entry.relative_path);
+ ret_val = aws_directory_traverse(allocator, rel_path_str, recursive, on_entry, user_data);
+ aws_string_destroy(rel_path_str);
+ }
+
+ /* post order traversal, if a node below us ended the traversal, don't call the visitor again. */
+ if (ret_val && aws_last_error() == AWS_ERROR_OPERATION_INTERUPTED) {
+ goto cleanup;
+ }
+
+ if (!on_entry(&entry, user_data)) {
+ ret_val = aws_raise_error(AWS_ERROR_OPERATION_INTERUPTED);
+ goto cleanup;
+ }
+
+ if (ret_val) {
+ goto cleanup;
+ }
+
+ cleanup:
+ /* per https://man7.org/linux/man-pages/man3/realpath.3.html, realpath must be freed, if NULL was passed
+ * to the second argument. */
+ if (full_path) {
+ free((void *)full_path);
+ }
+ aws_byte_buf_clean_up(&relative_path);
+ }
+ }
+
+ closedir(dir);
+ return ret_val;
+}
+
+char aws_get_platform_directory_separator(void) {
+ return '/';
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME");
+
+struct aws_string *aws_get_home_directory(struct aws_allocator *allocator) {
+
+ /* ToDo: check getpwuid_r if environment check fails */
+ struct aws_string *home_env_var_value = NULL;
+ if (aws_get_environment_value(allocator, s_home_env_var, &home_env_var_value) == 0 && home_env_var_value != NULL) {
+ return home_env_var_value;
+ }
+
+ return NULL;
+}
+
+bool aws_path_exists(const struct aws_string *path) {
+ struct stat buffer;
+ return stat(aws_string_c_str(path), &buffer) == 0;
+}
+
+int aws_fseek(FILE *file, int64_t offset, int whence) {
+
+#ifdef AWS_HAVE_POSIX_LARGE_FILE_SUPPORT
+ int result = fseeko(file, offset, whence);
+#else
+ /* must use fseek(), which takes offset as a long */
+ if (offset < LONG_MIN || offset > LONG_MAX) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ int result = fseek(file, offset, whence);
+#endif /* AWS_HAVE_POSIX_LFS */
+
+ if (result != 0) {
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_file_get_length(FILE *file, int64_t *length) {
+
+ struct stat file_stats;
+
+ int fd = fileno(file);
+ if (fd == -1) {
+ return aws_raise_error(AWS_ERROR_INVALID_FILE_HANDLE);
+ }
+
+ if (fstat(fd, &file_stats)) {
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ *length = file_stats.st_size;
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
index 1311be4096..e841243fb1 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
@@ -8,11 +8,17 @@
#include <aws/common/byte_buf.h>
#include <aws/common/logging.h>
#include <aws/common/platform.h>
+#include <aws/common/private/dlloads.h>
#if defined(__FreeBSD__) || defined(__NetBSD__)
# define __BSD_VISIBLE 1
#endif
+#if defined(__linux__) || defined(__unix__)
+# include <sys/sysinfo.h>
+# include <sys/types.h>
+#endif
+
#include <unistd.h>
#if defined(HAVE_SYSCONF)
@@ -39,6 +45,74 @@ size_t aws_system_info_processor_count(void) {
#include <ctype.h>
#include <fcntl.h>
+uint16_t aws_get_cpu_group_count(void) {
+ if (g_numa_num_configured_nodes_ptr) {
+ return (uint16_t)g_numa_num_configured_nodes_ptr();
+ }
+
+ return 1u;
+}
+
+size_t aws_get_cpu_count_for_group(uint16_t group_idx) {
+ if (g_numa_node_of_cpu_ptr) {
+ size_t total_cpus = aws_system_info_processor_count();
+
+ uint16_t cpu_count = 0;
+ for (size_t i = 0; i < total_cpus; ++i) {
+ if (group_idx == g_numa_node_of_cpu_ptr((int)i)) {
+ cpu_count++;
+ }
+ }
+ return cpu_count;
+ }
+
+ return aws_system_info_processor_count();
+}
+
+void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length) {
+ AWS_PRECONDITION(cpu_ids_array);
+
+ if (!cpu_ids_array_length) {
+ return;
+ }
+
+ /* go ahead and initialize everything. */
+ for (size_t i = 0; i < cpu_ids_array_length; ++i) {
+ cpu_ids_array[i].cpu_id = -1;
+ cpu_ids_array[i].suspected_hyper_thread = false;
+ }
+
+ if (g_numa_node_of_cpu_ptr) {
+ size_t total_cpus = aws_system_info_processor_count();
+ size_t current_array_idx = 0;
+ for (size_t i = 0; i < total_cpus && current_array_idx < cpu_ids_array_length; ++i) {
+ if ((int)group_idx == g_numa_node_of_cpu_ptr((int)i)) {
+ cpu_ids_array[current_array_idx].cpu_id = (int32_t)i;
+
+ /* looking for an index jump is a more reliable way to find these. If they're in the group and then
+ * the index jumps, say from 17 to 36, we're most-likely in hyper-thread land. Also, inside a node,
+ * once we find the first hyper-thread, the remaining cores are also likely hyper threads. */
+ if (current_array_idx > 0 && (cpu_ids_array[current_array_idx - 1].suspected_hyper_thread ||
+ cpu_ids_array[current_array_idx - 1].cpu_id < ((int)i - 1))) {
+ cpu_ids_array[current_array_idx].suspected_hyper_thread = true;
+ }
+ current_array_idx += 1;
+ }
+ }
+
+ return;
+ }
+
+ /* a crude hint, but hyper-threads are numbered as the second half of the cpu id listing. The assumption if you
+ * hit here is that this is just listing all cpus on the system. */
+ size_t hyper_thread_hint = cpu_ids_array_length / 2 - 1;
+
+ for (size_t i = 0; i < cpu_ids_array_length; ++i) {
+ cpu_ids_array[i].cpu_id = (int32_t)i;
+ cpu_ids_array[i].suspected_hyper_thread = i > hyper_thread_hint;
+ }
+}
+
bool aws_is_debugger_present(void) {
/* Open the status file */
const int status_fd = open("/proc/self/status", O_RDONLY);
@@ -124,7 +198,7 @@ char *s_whitelist_chars(char *path) {
# include <dlfcn.h>
# include <mach-o/dyld.h>
static char s_exe_path[PATH_MAX];
-const char *s_get_executable_path() {
+static const char *s_get_executable_path(void) {
static const char *s_exe = NULL;
if (AWS_LIKELY(s_exe)) {
return s_exe;
@@ -312,43 +386,7 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) {
}
fprintf(fp, "################################################################################\n");
- fprintf(fp, "Resolved stacktrace:\n");
- fprintf(fp, "################################################################################\n");
- /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
- * or: <exe-or-shared-lib> [0x<addr>]
- * or: [0x<addr>]
- * start at 1 to skip the current frame (this function) */
- for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
- struct aws_stack_frame_info frame;
- AWS_ZERO_STRUCT(frame);
- const char *symbol = symbols[frame_idx];
- if (s_parse_symbol(symbol, stack_frames[frame_idx], &frame)) {
- goto parse_failed;
- }
-
- /* TODO: Emulate libunwind */
- char cmd[sizeof(struct aws_stack_frame_info)] = {0};
- s_resolve_cmd(cmd, sizeof(cmd), &frame);
- FILE *out = popen(cmd, "r");
- if (!out) {
- goto parse_failed;
- }
- char output[1024];
- if (fgets(output, sizeof(output), out)) {
- /* if addr2line or atos don't know what to do with an address, they just echo it */
- /* if there are spaces in the output, then they resolved something */
- if (strstr(output, " ")) {
- symbol = output;
- }
- }
- pclose(out);
-
- parse_failed:
- fprintf(fp, "%s%s", symbol, (symbol == symbols[frame_idx]) ? "\n" : "");
- }
-
- fprintf(fp, "################################################################################\n");
- fprintf(fp, "Raw stacktrace:\n");
+ fprintf(fp, "Stack trace:\n");
fprintf(fp, "################################################################################\n");
for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
const char *symbol = symbols[frame_idx];
@@ -359,6 +397,21 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) {
free(symbols);
}
+void aws_backtrace_log(int log_level) {
+ void *stack_frames[AWS_BACKTRACE_DEPTH];
+ size_t num_frames = aws_backtrace(stack_frames, AWS_BACKTRACE_DEPTH);
+ if (!num_frames) {
+ AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "Unable to capture backtrace");
+ return;
+ }
+ char **symbols = aws_backtrace_symbols(stack_frames, num_frames);
+ for (size_t line = 0; line < num_frames; ++line) {
+ const char *symbol = symbols[line];
+ AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "%s", symbol);
+ }
+ free(symbols);
+}
+
#else
void aws_backtrace_print(FILE *fp, void *call_site_data) {
(void)call_site_data;
@@ -382,21 +435,11 @@ char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) {
(void)stack_depth;
return NULL;
}
-#endif /* AWS_HAVE_EXECINFO */
-void aws_backtrace_log() {
- void *stack_frames[1024];
- size_t num_frames = aws_backtrace(stack_frames, 1024);
- if (!num_frames) {
- return;
- }
- char **symbols = aws_backtrace_addr2line(stack_frames, num_frames);
- for (size_t line = 0; line < num_frames; ++line) {
- const char *symbol = symbols[line];
- AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%s", symbol);
- }
- free(symbols);
+void aws_backtrace_log(int log_level) {
+ AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "aws_backtrace_log: no execinfo compatible backtrace API available");
}
+#endif /* AWS_HAVE_EXECINFO */
#if defined(AWS_OS_APPLE)
enum aws_platform_os aws_get_platform_build_os(void) {
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/thread.c b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
index 064d16882f..4f742afe02 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/thread.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
@@ -8,8 +8,10 @@
#endif
#include <aws/common/clock.h>
+#include <aws/common/linked_list.h>
#include <aws/common/logging.h>
#include <aws/common/private/dlloads.h>
+#include <aws/common/private/thread_shared.h>
#include <aws/common/thread.h>
#include <dlfcn.h>
@@ -25,10 +27,29 @@
typedef cpuset_t cpu_set_t;
#endif
+#if !defined(AWS_AFFINITY_METHOD)
+# error "Must provide a method for setting thread affinity"
+#endif
+
+// Possible methods for setting thread affinity
+#define AWS_AFFINITY_METHOD_NONE 0
+#define AWS_AFFINITY_METHOD_PTHREAD_ATTR 1
+#define AWS_AFFINITY_METHOD_PTHREAD 2
+
+// Ensure provided affinity method matches one of the supported values
+// clang-format off
+#if AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_NONE \
+ && AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_PTHREAD_ATTR \
+ && AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_PTHREAD
+// clang-format on
+# error "Invalid thread affinity method"
+#endif
+
static struct aws_thread_options s_default_options = {
/* this will make sure platform default stack size is used. */
.stack_size = 0,
.cpu_id = -1,
+ .join_strategy = AWS_TJS_MANUAL,
};
struct thread_atexit_callback {
@@ -39,44 +60,92 @@ struct thread_atexit_callback {
struct thread_wrapper {
struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
void (*func)(void *arg);
void *arg;
struct thread_atexit_callback *atexit;
void (*call_once)(void *);
void *once_arg;
- struct aws_thread *thread;
+
+ /*
+ * The managed thread system does lazy joins on threads once finished via their wrapper. For that to work
+ * we need something to join against, so we keep a by-value copy of the original thread here. The tricky part
+ * is how to set the threadid/handle of this copy since the copy must be injected into the thread function before
+ * the threadid/handle is known. We get around that by just querying it at the top of the wrapper thread function.
+ */
+ struct aws_thread thread_copy;
bool membind;
};
static AWS_THREAD_LOCAL struct thread_wrapper *tl_wrapper = NULL;
+/*
+ * thread_wrapper is platform-dependent so this function ends up being duplicated in each thread implementation
+ */
+void aws_thread_join_and_free_wrapper_list(struct aws_linked_list *wrapper_list) {
+ struct aws_linked_list_node *iter = aws_linked_list_begin(wrapper_list);
+ while (iter != aws_linked_list_end(wrapper_list)) {
+
+ struct thread_wrapper *join_thread_wrapper = AWS_CONTAINER_OF(iter, struct thread_wrapper, node);
+
+ /*
+ * Can't do a for-loop since we need to advance to the next wrapper before we free the wrapper
+ */
+ iter = aws_linked_list_next(iter);
+
+ join_thread_wrapper->thread_copy.detach_state = AWS_THREAD_JOINABLE;
+ aws_thread_join(&join_thread_wrapper->thread_copy);
+
+ /*
+ * This doesn't actually do anything when using posix threads, but it keeps us
+ * in sync with the Windows version as well as the lifecycle contract we're
+ * presenting for threads.
+ */
+ aws_thread_clean_up(&join_thread_wrapper->thread_copy);
+
+ aws_mem_release(join_thread_wrapper->allocator, join_thread_wrapper);
+
+ aws_thread_decrement_unjoined_count();
+ }
+}
+
static void *thread_fn(void *arg) {
- struct thread_wrapper wrapper = *(struct thread_wrapper *)arg;
+ struct thread_wrapper *wrapper_ptr = arg;
+
+ /*
+ * Make sure the aws_thread copy has the right thread id stored in it.
+ */
+ wrapper_ptr->thread_copy.thread_id = aws_thread_current_thread_id();
+
+ struct thread_wrapper wrapper = *wrapper_ptr;
struct aws_allocator *allocator = wrapper.allocator;
tl_wrapper = &wrapper;
+
if (wrapper.membind && g_set_mempolicy_ptr) {
AWS_LOGF_INFO(
AWS_LS_COMMON_THREAD,
- "id=%p: a cpu affinity was specified when launching this thread and set_mempolicy() is available on this "
- "system. Setting the memory policy to MPOL_PREFERRED",
- (void *)tl_wrapper->thread);
+ "a cpu affinity was specified when launching this thread and set_mempolicy() is available on this "
+ "system. Setting the memory policy to MPOL_PREFERRED");
/* if a user set a cpu id in their thread options, we're going to make sure the numa policy honors that
* and makes sure the numa node of the cpu we launched this thread on is where memory gets allocated. However,
* we don't want to fail the application if this fails, so make the call, and ignore the result. */
long resp = g_set_mempolicy_ptr(AWS_MPOL_PREFERRED_ALIAS, NULL, 0);
if (resp) {
- AWS_LOGF_WARN(
- AWS_LS_COMMON_THREAD,
- "id=%p: call to set_mempolicy() failed with errno %d",
- (void *)wrapper.thread,
- errno);
+ AWS_LOGF_WARN(AWS_LS_COMMON_THREAD, "call to set_mempolicy() failed with errno %d", errno);
}
}
wrapper.func(wrapper.arg);
- struct thread_atexit_callback *exit_callback_data = wrapper.atexit;
- aws_mem_release(allocator, arg);
+ /*
+ * Managed threads don't free the wrapper yet. The thread management system does it later after the thread
+ * is joined.
+ */
+ bool is_managed_thread = wrapper.thread_copy.detach_state == AWS_THREAD_MANAGED;
+ if (!is_managed_thread) {
+ aws_mem_release(allocator, arg);
+ }
+ struct thread_atexit_callback *exit_callback_data = wrapper.atexit;
while (exit_callback_data) {
aws_thread_atexit_fn *exit_callback = exit_callback_data->callback;
void *exit_callback_user_data = exit_callback_data->user_data;
@@ -89,6 +158,13 @@ static void *thread_fn(void *arg) {
}
tl_wrapper = NULL;
+ /*
+ * Release this thread to the managed thread system for lazy join.
+ */
+ if (is_managed_thread) {
+ aws_thread_pending_join_add(&wrapper_ptr->node);
+ }
+
return NULL;
}
@@ -138,6 +214,10 @@ int aws_thread_launch(
pthread_attr_t *attributes_ptr = NULL;
int attr_return = 0;
int allocation_failed = 0;
+ bool is_managed_thread = options != NULL && options->join_strategy == AWS_TJS_MANAGED;
+ if (is_managed_thread) {
+ thread->detach_state = AWS_THREAD_MANAGED;
+ }
if (options) {
attr_return = pthread_attr_init(&attributes);
@@ -160,7 +240,7 @@ int aws_thread_launch(
* NUMA or not is setup in interleave mode.
* Thread afinity is also not supported on Android systems, and honestly, if you're running android on a NUMA
* configuration, you've got bigger problems. */
-#if !defined(__MACH__) && !defined(__ANDROID__) && !defined(_musl_)
+#if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR
if (options->cpu_id >= 0) {
AWS_LOGF_INFO(
AWS_LS_COMMON_THREAD,
@@ -183,7 +263,7 @@ int aws_thread_launch(
goto cleanup;
}
}
-#endif /* !defined(__MACH__) && !defined(__ANDROID__) */
+#endif /* AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR */
}
struct thread_wrapper *wrapper =
@@ -198,17 +278,58 @@ int aws_thread_launch(
wrapper->membind = true;
}
- wrapper->thread = thread;
+ wrapper->thread_copy = *thread;
wrapper->allocator = thread->allocator;
wrapper->func = func;
wrapper->arg = arg;
+
+ /*
+ * Increment the count prior to spawning the thread. Decrement back if the create failed.
+ */
+ if (is_managed_thread) {
+ aws_thread_increment_unjoined_count();
+ }
+
attr_return = pthread_create(&thread->thread_id, attributes_ptr, thread_fn, (void *)wrapper);
if (attr_return) {
+ if (is_managed_thread) {
+ aws_thread_decrement_unjoined_count();
+ }
goto cleanup;
}
- thread->detach_state = AWS_THREAD_JOINABLE;
+#if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD
+ /* If we don't have pthread_attr_setaffinity_np, we may
+ * still be able to set the thread affinity after creation. */
+ if (options && options->cpu_id >= 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_COMMON_THREAD,
+ "id=%p: cpu affinity of cpu_id %d was specified, attempting to honor the value.",
+ (void *)thread,
+ options->cpu_id);
+
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET((uint32_t)options->cpu_id, &cpuset);
+
+ attr_return = pthread_setaffinity_np(thread->thread_id, sizeof(cpuset), &cpuset);
+ if (attr_return) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_THREAD, "id=%p: pthread_setaffinity_np() failed with %d.", (void *)thread, errno);
+ goto cleanup;
+ }
+ }
+#endif /* AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD */
+ /*
+ * Managed threads need to stay unjoinable from an external perspective. We'll handle it after thread function
+ * completion.
+ */
+ if (is_managed_thread) {
+ aws_thread_clean_up(thread);
+ } else {
+ thread->detach_state = AWS_THREAD_JOINABLE;
+ }
cleanup:
if (attributes_ptr) {
diff --git a/contrib/restricted/aws/aws-c-common/source/priority_queue.c b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
index 14ff421d5f..f7d0f54e2d 100644
--- a/contrib/restricted/aws/aws-c-common/source/priority_queue.c
+++ b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
@@ -100,7 +100,7 @@ static bool s_sift_up(struct aws_priority_queue *queue, size_t index) {
bool did_move = false;
- void *parent_item, *child_item;
+ void *parent_item = NULL, *child_item = NULL;
size_t parent = PARENT_OF(index);
while (index) {
/*
diff --git a/contrib/restricted/aws/aws-c-common/source/process_common.c b/contrib/restricted/aws/aws-c-common/source/process_common.c
index 9b734c46f8..ef432374b8 100644
--- a/contrib/restricted/aws/aws-c-common/source/process_common.c
+++ b/contrib/restricted/aws/aws-c-common/source/process_common.c
@@ -27,6 +27,18 @@ void aws_run_command_result_cleanup(struct aws_run_command_result *result) {
aws_string_destroy_secure(result->std_err);
}
+#if defined(AWS_OS_WINDOWS) && !defined(AWS_OS_WINDOWS_DESKTOP)
+int aws_run_command(
+ struct aws_allocator *allocator,
+ struct aws_run_command_options *options,
+ struct aws_run_command_result *result) {
+ (void)allocator;
+ (void)options;
+ (void)result;
+ return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+}
+#else
+
int aws_run_command(
struct aws_allocator *allocator,
struct aws_run_command_options *options,
@@ -44,11 +56,11 @@ int aws_run_command(
goto on_finish;
}
-#ifdef _WIN32
+# if defined(AWS_OS_WINDOWS)
output_stream = _popen(options->command, "r");
-#else
+# else
output_stream = popen(options->command, "r");
-#endif
+# endif
if (output_stream) {
while (!feof(output_stream)) {
@@ -59,11 +71,11 @@ int aws_run_command(
}
}
}
-#ifdef _WIN32
+# if defined(AWS_OS_WINDOWS)
result->ret_code = _pclose(output_stream);
-#else
+# else
result->ret_code = pclose(output_stream);
-#endif
+# endif
}
struct aws_byte_cursor trim_cursor = aws_byte_cursor_from_buf(&result_buffer);
@@ -80,3 +92,4 @@ on_finish:
aws_byte_buf_clean_up_secure(&result_buffer);
return ret;
}
+#endif /* !AWS_OS_WINDOWS */
diff --git a/contrib/restricted/aws/aws-c-common/source/promise.c b/contrib/restricted/aws/aws-c-common/source/promise.c
new file mode 100644
index 0000000000..444623d625
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/promise.c
@@ -0,0 +1,115 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/condition_variable.h>
+#include <aws/common/mutex.h>
+#include <aws/common/promise.h>
+#include <aws/common/ref_count.h>
+
+struct aws_promise {
+ struct aws_allocator *allocator;
+ struct aws_mutex mutex;
+ struct aws_condition_variable cv;
+ struct aws_ref_count rc;
+ bool complete;
+ int error_code;
+ void *value;
+
+ /* destructor for value, will be invoked if the value is not taken */
+ void (*dtor)(void *);
+};
+
+static void s_aws_promise_dtor(void *ptr) {
+ struct aws_promise *promise = ptr;
+ aws_condition_variable_clean_up(&promise->cv);
+ aws_mutex_clean_up(&promise->mutex);
+ if (promise->value && promise->dtor) {
+ promise->dtor(promise->value);
+ }
+ aws_mem_release(promise->allocator, promise);
+}
+
+struct aws_promise *aws_promise_new(struct aws_allocator *allocator) {
+ struct aws_promise *promise = aws_mem_calloc(allocator, 1, sizeof(struct aws_promise));
+ promise->allocator = allocator;
+ aws_ref_count_init(&promise->rc, promise, s_aws_promise_dtor);
+ aws_mutex_init(&promise->mutex);
+ aws_condition_variable_init(&promise->cv);
+ return promise;
+}
+
+struct aws_promise *aws_promise_acquire(struct aws_promise *promise) {
+ aws_ref_count_acquire(&promise->rc);
+ return promise;
+}
+
+void aws_promise_release(struct aws_promise *promise) {
+ aws_ref_count_release(&promise->rc);
+}
+
+static bool s_promise_completed(void *user_data) {
+ struct aws_promise *promise = user_data;
+ return promise->complete;
+}
+
+void aws_promise_wait(struct aws_promise *promise) {
+ aws_mutex_lock(&promise->mutex);
+ aws_condition_variable_wait_pred(&promise->cv, &promise->mutex, s_promise_completed, promise);
+ aws_mutex_unlock(&promise->mutex);
+}
+
+bool aws_promise_wait_for(struct aws_promise *promise, size_t nanoseconds) {
+ aws_mutex_lock(&promise->mutex);
+ aws_condition_variable_wait_for_pred(
+ &promise->cv, &promise->mutex, (int64_t)nanoseconds, s_promise_completed, promise);
+ const bool complete = promise->complete;
+ aws_mutex_unlock(&promise->mutex);
+ return complete;
+}
+
+bool aws_promise_is_complete(struct aws_promise *promise) {
+ aws_mutex_lock(&promise->mutex);
+ const bool complete = promise->complete;
+ aws_mutex_unlock(&promise->mutex);
+ return complete;
+}
+
+void aws_promise_complete(struct aws_promise *promise, void *value, void (*dtor)(void *)) {
+ aws_mutex_lock(&promise->mutex);
+ AWS_FATAL_ASSERT(!promise->complete && "aws_promise_complete: cannot complete a promise more than once");
+ promise->value = value;
+ promise->dtor = dtor;
+ promise->complete = true;
+ aws_mutex_unlock(&promise->mutex);
+ aws_condition_variable_notify_all(&promise->cv);
+}
+
+void aws_promise_fail(struct aws_promise *promise, int error_code) {
+ AWS_FATAL_ASSERT(error_code != 0 && "aws_promise_fail: cannot fail a promise with a 0 error_code");
+ aws_mutex_lock(&promise->mutex);
+ AWS_FATAL_ASSERT(!promise->complete && "aws_promise_fail: cannot complete a promise more than once");
+ promise->error_code = error_code;
+ promise->complete = true;
+ aws_mutex_unlock(&promise->mutex);
+ aws_condition_variable_notify_all(&promise->cv);
+}
+
+int aws_promise_error_code(struct aws_promise *promise) {
+ AWS_FATAL_ASSERT(aws_promise_is_complete(promise));
+ return promise->error_code;
+}
+
+void *aws_promise_value(struct aws_promise *promise) {
+ AWS_FATAL_ASSERT(aws_promise_is_complete(promise));
+ return promise->value;
+}
+
+void *aws_promise_take_value(struct aws_promise *promise) {
+ AWS_FATAL_ASSERT(aws_promise_is_complete(promise));
+ void *value = promise->value;
+ promise->value = NULL;
+ promise->dtor = NULL;
+ return value;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/ref_count.c b/contrib/restricted/aws/aws-c-common/source/ref_count.c
index a1d938b022..1e90e4c8a1 100644
--- a/contrib/restricted/aws/aws-c-common/source/ref_count.c
+++ b/contrib/restricted/aws/aws-c-common/source/ref_count.c
@@ -15,7 +15,9 @@ void aws_ref_count_init(struct aws_ref_count *ref_count, void *object, aws_simpl
}
void *aws_ref_count_acquire(struct aws_ref_count *ref_count) {
- aws_atomic_fetch_add(&ref_count->ref_count, 1);
+ size_t old_value = aws_atomic_fetch_add(&ref_count->ref_count, 1);
+ AWS_ASSERT(old_value > 0 && "refcount has been zero, it's invalid to use it again.");
+ (void)old_value;
return ref_count->object;
}
@@ -29,52 +31,3 @@ size_t aws_ref_count_release(struct aws_ref_count *ref_count) {
return old_value - 1;
}
-
-static struct aws_condition_variable s_global_thread_signal = AWS_CONDITION_VARIABLE_INIT;
-static struct aws_mutex s_global_thread_lock = AWS_MUTEX_INIT;
-static uint32_t s_global_thread_count = 0;
-
-void aws_global_thread_creator_increment(void) {
- aws_mutex_lock(&s_global_thread_lock);
- ++s_global_thread_count;
- aws_mutex_unlock(&s_global_thread_lock);
-}
-
-void aws_global_thread_creator_decrement(void) {
- bool signal = false;
- aws_mutex_lock(&s_global_thread_lock);
- AWS_ASSERT(s_global_thread_count != 0 && "global tracker has gone negative");
- --s_global_thread_count;
- if (s_global_thread_count == 0) {
- signal = true;
- }
- aws_mutex_unlock(&s_global_thread_lock);
-
- if (signal) {
- aws_condition_variable_notify_all(&s_global_thread_signal);
- }
-}
-
-static bool s_thread_count_zero_pred(void *user_data) {
- (void)user_data;
-
- return s_global_thread_count == 0;
-}
-
-void aws_global_thread_creator_shutdown_wait(void) {
- aws_mutex_lock(&s_global_thread_lock);
- aws_condition_variable_wait_pred(&s_global_thread_signal, &s_global_thread_lock, s_thread_count_zero_pred, NULL);
- aws_mutex_unlock(&s_global_thread_lock);
-}
-
-int aws_global_thread_creator_shutdown_wait_for(uint32_t wait_timeout_in_seconds) {
- int64_t wait_time_in_nanos =
- aws_timestamp_convert(wait_timeout_in_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
-
- aws_mutex_lock(&s_global_thread_lock);
- int result = aws_condition_variable_wait_for_pred(
- &s_global_thread_signal, &s_global_thread_lock, wait_time_in_nanos, s_thread_count_zero_pred, NULL);
- aws_mutex_unlock(&s_global_thread_lock);
-
- return result;
-}
diff --git a/contrib/restricted/aws/aws-c-common/source/resource_name.c b/contrib/restricted/aws/aws-c-common/source/resource_name.c
deleted file mode 100644
index 0a7b972ea1..0000000000
--- a/contrib/restricted/aws/aws-c-common/source/resource_name.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/resource_name.h>
-
-#define ARN_SPLIT_COUNT ((size_t)5)
-#define ARN_PARTS_COUNT ((size_t)6)
-
-static const char ARN_DELIMETER[] = ":";
-static const char ARN_DELIMETER_CHAR = ':';
-
-static const size_t DELIMETER_LEN = 8; /* strlen("arn:::::") */
-
-AWS_COMMON_API
-int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input) {
- struct aws_byte_cursor arn_parts[ARN_PARTS_COUNT];
- struct aws_array_list arn_part_list;
- aws_array_list_init_static(&arn_part_list, arn_parts, ARN_PARTS_COUNT, sizeof(struct aws_byte_cursor));
- if (aws_byte_cursor_split_on_char_n(input, ARN_DELIMETER_CHAR, ARN_SPLIT_COUNT, &arn_part_list)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- struct aws_byte_cursor *arn_prefix;
- if (aws_array_list_get_at_ptr(&arn_part_list, (void **)&arn_prefix, 0) ||
- !aws_byte_cursor_eq_c_str(arn_prefix, "arn")) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->partition, 1)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->service, 2)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->region, 3)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->account_id, 4) || aws_byte_cursor_eq_c_str(&arn->account_id, "")) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->resource_id, 5)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- return AWS_OP_SUCCESS;
-}
-
-AWS_COMMON_API
-int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
-
- *size = arn->partition.len + arn->region.len + arn->service.len + arn->account_id.len + arn->resource_id.len +
- DELIMETER_LEN;
-
- return AWS_OP_SUCCESS;
-}
-
-AWS_COMMON_API
-int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
-
- const struct aws_byte_cursor prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:");
- const struct aws_byte_cursor colon_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ARN_DELIMETER);
-
- if (aws_byte_buf_append(buf, &prefix)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &arn->partition)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->service)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->region)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->account_id)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->resource_id)) {
- return aws_raise_error(aws_last_error());
- }
-
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return AWS_OP_SUCCESS;
-}
diff --git a/contrib/restricted/aws/aws-c-common/source/ring_buffer.c b/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
index 6ebecebf47..bcc8ffaad3 100644
--- a/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
+++ b/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
@@ -73,7 +73,7 @@ int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_s
/* this branch is, we don't have any vended buffers. */
if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
+ size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
if (requested_size > ring_space) {
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
@@ -147,7 +147,7 @@ int aws_ring_buffer_acquire_up_to(
/* this branch is, we don't have any vended buffers. */
if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
+ size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
@@ -232,10 +232,11 @@ static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buff
#ifdef CBMC
/* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
* (C11 6.5.8) */
- if (!__CPROVER_same_object(buf->buffer, ring_buffer->allocation) ||
- !__CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)) {
- return false;
- }
+ return (
+ __CPROVER_same_object(buf->buffer, ring_buffer->allocation) &&
+ AWS_IMPLIES(
+ ring_buffer->allocation_end != NULL, __CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)));
+
#endif
return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
@@ -258,64 +259,3 @@ bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buff
AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
return rval;
}
-
-/* Ring buffer allocator implementation */
-static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) {
- struct aws_ring_buffer *buffer = allocator->impl;
- struct aws_byte_buf buf;
- AWS_ZERO_STRUCT(buf);
- /* allocate extra space for the size */
- if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) {
- return NULL;
- }
- /* store the size ahead of the allocation */
- *((size_t *)buf.buffer) = buf.capacity;
- return buf.buffer + sizeof(size_t);
-}
-
-static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) {
- /* back up to where the size is stored */
- const void *addr = ((uint8_t *)ptr - sizeof(size_t));
- const size_t size = *((size_t *)addr);
-
- struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size);
- buf.allocator = allocator;
-
- struct aws_ring_buffer *buffer = allocator->impl;
- aws_ring_buffer_release(buffer, &buf);
-}
-
-static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- void *mem = s_ring_buffer_mem_acquire(allocator, num * size);
- if (!mem) {
- return NULL;
- }
- memset(mem, 0, num * size);
- return mem;
-}
-
-static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) {
- (void)allocator;
- (void)ptr;
- (void)old_size;
- (void)new_size;
- AWS_FATAL_ASSERT(!"ring_buffer_allocator does not support realloc, as it breaks allocation ordering");
- return NULL;
-}
-
-int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) {
- if (allocator == NULL || ring_buffer == NULL) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- allocator->impl = ring_buffer;
- allocator->mem_acquire = s_ring_buffer_mem_acquire;
- allocator->mem_release = s_ring_buffer_mem_release;
- allocator->mem_calloc = s_ring_buffer_mem_calloc;
- allocator->mem_realloc = s_ring_buffer_mem_realloc;
- return AWS_OP_SUCCESS;
-}
-
-void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) {
- AWS_ZERO_STRUCT(*allocator);
-}
diff --git a/contrib/restricted/aws/aws-c-common/source/string.c b/contrib/restricted/aws/aws-c-common/source/string.c
index d1abf0dbff..a3d2c204ed 100644
--- a/contrib/restricted/aws/aws-c-common/source/string.c
+++ b/contrib/restricted/aws/aws-c-common/source/string.c
@@ -4,6 +4,183 @@
*/
#include <aws/common/string.h>
+#ifdef _WIN32
+# include <windows.h>
+
+struct aws_wstring *aws_string_convert_to_wstring(
+ struct aws_allocator *allocator,
+ const struct aws_string *to_convert) {
+ AWS_PRECONDITION(to_convert);
+
+ struct aws_byte_cursor convert_cur = aws_byte_cursor_from_string(to_convert);
+ return aws_string_convert_to_wchar_from_byte_cursor(allocator, &convert_cur);
+}
+
+struct aws_wstring *aws_string_convert_to_wchar_from_byte_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *to_convert) {
+ AWS_PRECONDITION(to_convert);
+
+ /* if a length is passed for the to_convert string, converted size does not include the null terminator,
+ * which is a good thing. */
+ int converted_size = MultiByteToWideChar(CP_UTF8, 0, (const char *)to_convert->ptr, (int)to_convert->len, NULL, 0);
+
+ if (!converted_size) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ size_t str_len_size = 0;
+ size_t malloc_size = 0;
+
+ /* double the size because the return value above is # of characters, not bytes size. */
+ if (aws_mul_size_checked(sizeof(wchar_t), converted_size, &str_len_size)) {
+ return NULL;
+ }
+
+ /* UTF-16, the NULL terminator is two bytes. */
+ if (aws_add_size_checked(sizeof(struct aws_wstring) + 2, str_len_size, &malloc_size)) {
+ return NULL;
+ }
+
+ struct aws_wstring *str = aws_mem_acquire(allocator, malloc_size);
+ if (!str) {
+ return NULL;
+ }
+
+ /* Fields are declared const, so we need to copy them in like this */
+ *(struct aws_allocator **)(&str->allocator) = allocator;
+ *(size_t *)(&str->len) = (size_t)converted_size;
+
+ int converted_res = MultiByteToWideChar(
+ CP_UTF8, 0, (const char *)to_convert->ptr, (int)to_convert->len, (wchar_t *)str->bytes, converted_size);
+ /* windows had its chance to do its thing, no take backsies. */
+ AWS_FATAL_ASSERT(converted_res > 0);
+
+ *(wchar_t *)&str->bytes[converted_size] = 0;
+ return str;
+}
+
+struct aws_wstring *aws_wstring_new_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *w_str_cur) {
+ AWS_PRECONDITION(allocator && aws_byte_cursor_is_valid(w_str_cur));
+ return aws_wstring_new_from_array(allocator, (wchar_t *)w_str_cur->ptr, w_str_cur->len / sizeof(wchar_t));
+}
+
+struct aws_wstring *aws_wstring_new_from_array(struct aws_allocator *allocator, const wchar_t *w_str, size_t len) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(w_str, len));
+
+ size_t str_byte_len = 0;
+ size_t malloc_size = 0;
+
+ /* double the size because the return value above is # of characters, not bytes size. */
+ if (aws_mul_size_checked(sizeof(wchar_t), len, &str_byte_len)) {
+ return NULL;
+ }
+
+ /* UTF-16, the NULL terminator is two bytes. */
+ if (aws_add_size_checked(sizeof(struct aws_wstring) + 2, str_byte_len, &malloc_size)) {
+ return NULL;
+ }
+
+ struct aws_wstring *str = aws_mem_acquire(allocator, malloc_size);
+
+ /* Fields are declared const, so we need to copy them in like this */
+ *(struct aws_allocator **)(&str->allocator) = allocator;
+ *(size_t *)(&str->len) = len;
+ if (len > 0) {
+ memcpy((void *)str->bytes, w_str, str_byte_len);
+ }
+ /* in case this is a utf-16 string in the array, allow that here. */
+ *(wchar_t *)&str->bytes[len] = 0;
+ AWS_RETURN_WITH_POSTCONDITION(str, aws_wstring_is_valid(str));
+}
+
+bool aws_wstring_is_valid(const struct aws_wstring *str) {
+ return str && AWS_MEM_IS_READABLE(&str->bytes[0], str->len + 1) && str->bytes[str->len] == 0;
+}
+
+void aws_wstring_destroy(struct aws_wstring *str) {
+ AWS_PRECONDITION(!str || aws_wstring_is_valid(str));
+ if (str && str->allocator) {
+ aws_mem_release(str->allocator, str);
+ }
+}
+
+static struct aws_string *s_convert_from_wchar(
+ struct aws_allocator *allocator,
+ const wchar_t *to_convert,
+ int len_chars) {
+ AWS_FATAL_PRECONDITION(to_convert);
+
+ int bytes_size = WideCharToMultiByte(CP_UTF8, 0, to_convert, len_chars, NULL, 0, NULL, NULL);
+
+ if (!bytes_size) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ size_t malloc_size = 0;
+
+ /* bytes_size already contains the space for the null terminator */
+ if (aws_add_size_checked(sizeof(struct aws_string), bytes_size, &malloc_size)) {
+ return NULL;
+ }
+
+ struct aws_string *str = aws_mem_acquire(allocator, malloc_size);
+ if (!str) {
+ return NULL;
+ }
+
+ /* Fields are declared const, so we need to copy them in like this */
+ *(struct aws_allocator **)(&str->allocator) = allocator;
+ *(size_t *)(&str->len) = (size_t)bytes_size - 1;
+
+ int converted_res =
+ WideCharToMultiByte(CP_UTF8, 0, to_convert, len_chars, (char *)str->bytes, bytes_size, NULL, NULL);
+ /* windows had its chance to do its thing, no take backsies. */
+ AWS_FATAL_ASSERT(converted_res > 0);
+
+ *(uint8_t *)&str->bytes[str->len] = 0;
+ return str;
+}
+
+struct aws_string *aws_string_convert_from_wchar_str(
+ struct aws_allocator *allocator,
+ const struct aws_wstring *to_convert) {
+ AWS_FATAL_PRECONDITION(to_convert);
+
+ return s_convert_from_wchar(allocator, aws_wstring_c_str(to_convert), (int)aws_wstring_num_chars(to_convert));
+}
+struct aws_string *aws_string_convert_from_wchar_c_str(struct aws_allocator *allocator, const wchar_t *to_convert) {
+ return s_convert_from_wchar(allocator, to_convert, -1);
+}
+
+const wchar_t *aws_wstring_c_str(const struct aws_wstring *str) {
+ AWS_PRECONDITION(str);
+ return str->bytes;
+}
+
+size_t aws_wstring_num_chars(const struct aws_wstring *str) {
+ AWS_PRECONDITION(str);
+
+ if (str->len == 0) {
+ return 0;
+ }
+
+ return str->len;
+}
+
+size_t aws_wstring_size_bytes(const struct aws_wstring *str) {
+ AWS_PRECONDITION(str);
+
+ return aws_wstring_num_chars(str) * sizeof(wchar_t);
+}
+
+#endif /* _WIN32 */
+
struct aws_string *aws_string_new_from_c_str(struct aws_allocator *allocator, const char *c_str) {
AWS_PRECONDITION(allocator && c_str);
return aws_string_new_from_array(allocator, (const uint8_t *)c_str, strlen(c_str));
@@ -27,7 +204,7 @@ struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, co
if (len > 0) {
memcpy((void *)str->bytes, bytes, len);
}
- *(uint8_t *)&str->bytes[len] = '\0';
+ *(uint8_t *)&str->bytes[len] = 0;
AWS_RETURN_WITH_POSTCONDITION(str, aws_string_is_valid(str));
}
diff --git a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
index 31ce7af1ab..4467b12493 100644
--- a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
+++ b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
@@ -40,6 +40,7 @@ void aws_task_run(struct aws_task *task, enum aws_task_status status) {
task->type_tag,
aws_task_status_to_c_str(status));
+ task->abi_extension.scheduled = false;
task->fn(task, task->arg, status);
}
@@ -139,6 +140,7 @@ void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struc
task->timestamp = 0;
aws_linked_list_push_back(&scheduler->asap_list, &task->node);
+ task->abi_extension.scheduled = true;
}
void aws_task_scheduler_schedule_future(
@@ -177,6 +179,7 @@ void aws_task_scheduler_schedule_future(
}
aws_linked_list_insert_before(node_i, &task->node);
}
+ task->abi_extension.scheduled = true;
}
void aws_task_scheduler_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time) {
@@ -253,7 +256,7 @@ void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct
*/
if (task->node.next) {
aws_linked_list_remove(&task->node);
- } else {
+ } else if (task->abi_extension.scheduled) {
aws_priority_queue_remove(&scheduler->timed_queue, &task, &task->priority_queue_node);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c b/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
new file mode 100644
index 0000000000..7999344b7b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
@@ -0,0 +1,225 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/clock.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/task_scheduler.h>
+#include <aws/common/thread.h>
+#include <aws/common/thread_scheduler.h>
+
+struct aws_thread_scheduler {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+ struct aws_thread thread;
+ struct aws_task_scheduler scheduler;
+ struct aws_atomic_var should_exit;
+
+ struct {
+ struct aws_linked_list scheduling_queue;
+ struct aws_linked_list cancel_queue;
+ struct aws_mutex mutex;
+ struct aws_condition_variable c_var;
+ } thread_data;
+};
+
+struct cancellation_node {
+ struct aws_task *task_to_cancel;
+ struct aws_linked_list_node node;
+};
+
+static void s_destroy_callback(void *arg) {
+ struct aws_thread_scheduler *scheduler = arg;
+ aws_atomic_store_int(&scheduler->should_exit, 1u);
+ aws_condition_variable_notify_all(&scheduler->thread_data.c_var);
+ aws_thread_join(&scheduler->thread);
+ aws_task_scheduler_clean_up(&scheduler->scheduler);
+ aws_condition_variable_clean_up(&scheduler->thread_data.c_var);
+ aws_mutex_clean_up(&scheduler->thread_data.mutex);
+ aws_thread_clean_up(&scheduler->thread);
+ aws_mem_release(scheduler->allocator, scheduler);
+}
+
+static bool s_thread_should_wake(void *arg) {
+ struct aws_thread_scheduler *scheduler = arg;
+
+ uint64_t current_time = 0;
+ aws_high_res_clock_get_ticks(&current_time);
+
+ uint64_t next_scheduled_task = 0;
+ aws_task_scheduler_has_tasks(&scheduler->scheduler, &next_scheduled_task);
+ return aws_atomic_load_int(&scheduler->should_exit) ||
+ !aws_linked_list_empty(&scheduler->thread_data.scheduling_queue) ||
+ !aws_linked_list_empty(&scheduler->thread_data.cancel_queue) || (next_scheduled_task <= current_time);
+}
+
+static void s_thread_fn(void *arg) {
+ struct aws_thread_scheduler *scheduler = arg;
+
+ while (!aws_atomic_load_int(&scheduler->should_exit)) {
+
+ /* move tasks from the mutex protected list to the scheduler. This is because we don't want to hold the lock
+ * for the scheduler during run_all and then try and acquire the lock from another thread to schedule something
+ * because that potentially would block the calling thread. */
+ struct aws_linked_list list_cpy;
+ aws_linked_list_init(&list_cpy);
+ struct aws_linked_list cancel_list_cpy;
+ aws_linked_list_init(&cancel_list_cpy);
+
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+ aws_linked_list_swap_contents(&scheduler->thread_data.scheduling_queue, &list_cpy);
+ aws_linked_list_swap_contents(&scheduler->thread_data.cancel_queue, &cancel_list_cpy);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+
+ while (!aws_linked_list_empty(&list_cpy)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&list_cpy);
+ struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node);
+ if (task->timestamp) {
+ aws_task_scheduler_schedule_future(&scheduler->scheduler, task, task->timestamp);
+ } else {
+ aws_task_scheduler_schedule_now(&scheduler->scheduler, task);
+ }
+ }
+
+ /* now cancel the tasks. */
+ while (!aws_linked_list_empty(&cancel_list_cpy)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&cancel_list_cpy);
+ struct cancellation_node *cancellation_node = AWS_CONTAINER_OF(node, struct cancellation_node, node);
+ aws_task_scheduler_cancel_task(&scheduler->scheduler, cancellation_node->task_to_cancel);
+ aws_mem_release(scheduler->allocator, cancellation_node);
+ }
+
+ /* now run everything */
+ uint64_t current_time = 0;
+ aws_high_res_clock_get_ticks(&current_time);
+ aws_task_scheduler_run_all(&scheduler->scheduler, current_time);
+
+ uint64_t next_scheduled_task = 0;
+ aws_task_scheduler_has_tasks(&scheduler->scheduler, &next_scheduled_task);
+
+ int64_t timeout = 0;
+ if (next_scheduled_task == UINT64_MAX) {
+ /* at least wake up once per 30 seconds. */
+ timeout = (int64_t)30 * (int64_t)AWS_TIMESTAMP_NANOS;
+ } else {
+ timeout = (int64_t)(next_scheduled_task - current_time);
+ }
+
+ if (timeout > 0) {
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+
+ aws_condition_variable_wait_for_pred(
+ &scheduler->thread_data.c_var, &scheduler->thread_data.mutex, timeout, s_thread_should_wake, scheduler);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+ }
+ }
+}
+
+struct aws_thread_scheduler *aws_thread_scheduler_new(
+ struct aws_allocator *allocator,
+ const struct aws_thread_options *thread_options) {
+ struct aws_thread_scheduler *scheduler = aws_mem_calloc(allocator, 1, sizeof(struct aws_thread_scheduler));
+
+ if (!scheduler) {
+ return NULL;
+ }
+
+ if (aws_thread_init(&scheduler->thread, allocator)) {
+ goto clean_up;
+ }
+
+ AWS_FATAL_ASSERT(!aws_mutex_init(&scheduler->thread_data.mutex) && "mutex init failed!");
+ AWS_FATAL_ASSERT(!aws_condition_variable_init(&scheduler->thread_data.c_var) && "condition variable init failed!");
+
+ if (aws_task_scheduler_init(&scheduler->scheduler, allocator)) {
+ goto thread_init;
+ }
+
+ scheduler->allocator = allocator;
+ aws_atomic_init_int(&scheduler->should_exit, 0u);
+ aws_ref_count_init(&scheduler->ref_count, scheduler, s_destroy_callback);
+ aws_linked_list_init(&scheduler->thread_data.scheduling_queue);
+ aws_linked_list_init(&scheduler->thread_data.cancel_queue);
+
+ if (aws_thread_launch(&scheduler->thread, s_thread_fn, scheduler, thread_options)) {
+ goto scheduler_init;
+ }
+
+ return scheduler;
+
+scheduler_init:
+ aws_task_scheduler_clean_up(&scheduler->scheduler);
+
+thread_init:
+ aws_condition_variable_clean_up(&scheduler->thread_data.c_var);
+ aws_mutex_clean_up(&scheduler->thread_data.mutex);
+ aws_thread_clean_up(&scheduler->thread);
+
+clean_up:
+ aws_mem_release(allocator, scheduler);
+
+ return NULL;
+}
+
+void aws_thread_scheduler_acquire(struct aws_thread_scheduler *scheduler) {
+ aws_ref_count_acquire(&scheduler->ref_count);
+}
+
+void aws_thread_scheduler_release(const struct aws_thread_scheduler *scheduler) {
+ aws_ref_count_release((struct aws_ref_count *)&scheduler->ref_count);
+}
+
+void aws_thread_scheduler_schedule_future(
+ struct aws_thread_scheduler *scheduler,
+ struct aws_task *task,
+ uint64_t time_to_run) {
+ task->timestamp = time_to_run;
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+ aws_linked_list_push_back(&scheduler->thread_data.scheduling_queue, &task->node);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+ aws_condition_variable_notify_one(&scheduler->thread_data.c_var);
+}
+void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task) {
+ aws_thread_scheduler_schedule_future(scheduler, task, 0u);
+}
+
+void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task) {
+ struct cancellation_node *cancellation_node =
+ aws_mem_calloc(scheduler->allocator, 1, sizeof(struct cancellation_node));
+ AWS_FATAL_ASSERT(cancellation_node && "allocation failed for cancellation node!");
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+ struct aws_task *found_task = NULL;
+
+ /* remove tasks that are still in the scheduling queue, but haven't made it to the scheduler yet. */
+ struct aws_linked_list_node *node = aws_linked_list_empty(&scheduler->thread_data.scheduling_queue)
+ ? NULL
+ : aws_linked_list_front(&scheduler->thread_data.scheduling_queue);
+ while (node != NULL) {
+ struct aws_task *potential_task = AWS_CONTAINER_OF(node, struct aws_task, node);
+
+ if (potential_task == task) {
+ found_task = potential_task;
+ break;
+ }
+
+ if (aws_linked_list_node_next_is_valid(node)) {
+ node = aws_linked_list_next(node);
+ } else {
+ node = NULL;
+ }
+ }
+
+ if (found_task) {
+ aws_linked_list_remove(&found_task->node);
+ }
+
+ cancellation_node->task_to_cancel = task;
+
+ /* regardless put it in the cancel queue so the thread can call the task with canceled status. */
+ aws_linked_list_push_back(&scheduler->thread_data.cancel_queue, &cancellation_node->node);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+ /* notify so the loop knows to wakeup and process the cancellations. */
+ aws_condition_variable_notify_one(&scheduler->thread_data.c_var);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/thread_shared.c b/contrib/restricted/aws/aws-c-common/source/thread_shared.c
new file mode 100644
index 0000000000..a0d19adfe0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/thread_shared.c
@@ -0,0 +1,167 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/private/thread_shared.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/mutex.h>
+
+/*
+ * lock guarding the unjoined thread count and pending join list
+ */
+static struct aws_mutex s_managed_thread_lock = AWS_MUTEX_INIT;
+static struct aws_condition_variable s_managed_thread_signal = AWS_CONDITION_VARIABLE_INIT;
+static uint64_t s_default_managed_join_timeout_ns = 0;
+
+/*
+ * The number of successfully launched managed threads (or event loop threads which participate by inc/dec) that
+ * have not been joined yet.
+ */
+static uint32_t s_unjoined_thread_count = 0;
+
+/*
+ * A list of thread_wrapper structs for threads whose thread function has finished but join has not been called
+ * yet for the thread.
+ *
+ * This list is only ever at most length one.
+ */
+static struct aws_linked_list s_pending_join_managed_threads;
+
+void aws_thread_increment_unjoined_count(void) {
+ aws_mutex_lock(&s_managed_thread_lock);
+ ++s_unjoined_thread_count;
+ aws_mutex_unlock(&s_managed_thread_lock);
+}
+
+void aws_thread_decrement_unjoined_count(void) {
+ aws_mutex_lock(&s_managed_thread_lock);
+ --s_unjoined_thread_count;
+ aws_mutex_unlock(&s_managed_thread_lock);
+ aws_condition_variable_notify_one(&s_managed_thread_signal);
+}
+
+size_t aws_thread_get_managed_thread_count(void) {
+ size_t thread_count = 0;
+ aws_mutex_lock(&s_managed_thread_lock);
+ thread_count = s_unjoined_thread_count;
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ return thread_count;
+}
+
+static bool s_one_or_fewer_managed_threads_unjoined(void *context) {
+ (void)context;
+ return s_unjoined_thread_count <= 1;
+}
+
+void aws_thread_set_managed_join_timeout_ns(uint64_t timeout_in_ns) {
+ aws_mutex_lock(&s_managed_thread_lock);
+ s_default_managed_join_timeout_ns = timeout_in_ns;
+ aws_mutex_unlock(&s_managed_thread_lock);
+}
+
+int aws_thread_join_all_managed(void) {
+ struct aws_linked_list join_list;
+
+ aws_mutex_lock(&s_managed_thread_lock);
+ uint64_t timeout_in_ns = s_default_managed_join_timeout_ns;
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ uint64_t now_in_ns = 0;
+ uint64_t timeout_timestamp_ns = 0;
+ if (timeout_in_ns > 0) {
+ aws_sys_clock_get_ticks(&now_in_ns);
+ timeout_timestamp_ns = now_in_ns + timeout_in_ns;
+ }
+
+ bool successful = true;
+ bool done = false;
+ while (!done) {
+ aws_mutex_lock(&s_managed_thread_lock);
+
+ /*
+ * We lazily join old threads as newer ones finish their thread function. This means that when called from
+ * the main thread, there will always be one last thread (whichever completion serialized last) that is our
+ * responsibility to join (as long as at least one managed thread was created). So we wait for a count <= 1
+ * rather than what you'd normally expect (0).
+ *
+ * Absent a timeout, we only terminate if there are no threads left so it is possible to spin-wait a while
+ * if there is a single thread still running.
+ */
+ if (timeout_timestamp_ns > 0) {
+ uint64_t wait_ns = 0;
+
+ /*
+ * now_in_ns is always refreshed right before this either outside the loop before the first iteration or
+ * after the previous wait when the overall timeout was checked.
+ */
+ if (now_in_ns <= timeout_timestamp_ns) {
+ wait_ns = timeout_timestamp_ns - now_in_ns;
+ }
+
+ aws_condition_variable_wait_for_pred(
+ &s_managed_thread_signal,
+ &s_managed_thread_lock,
+ wait_ns,
+ s_one_or_fewer_managed_threads_unjoined,
+ NULL);
+ } else {
+ aws_condition_variable_wait_pred(
+ &s_managed_thread_signal, &s_managed_thread_lock, s_one_or_fewer_managed_threads_unjoined, NULL);
+ }
+
+ done = s_unjoined_thread_count == 0;
+
+ aws_sys_clock_get_ticks(&now_in_ns);
+ if (timeout_timestamp_ns != 0 && now_in_ns >= timeout_timestamp_ns) {
+ done = true;
+ successful = false;
+ }
+
+ aws_linked_list_init(&join_list);
+
+ aws_linked_list_swap_contents(&join_list, &s_pending_join_managed_threads);
+
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ /*
+ * Join against any finished threads. These threads are guaranteed to:
+ * (1) Not be the current thread
+ * (2) Have already ran to user thread_function completion
+ *
+ * The number of finished threads on any iteration is at most one.
+ */
+ aws_thread_join_and_free_wrapper_list(&join_list);
+ }
+
+ return successful ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+void aws_thread_pending_join_add(struct aws_linked_list_node *node) {
+ struct aws_linked_list join_list;
+ aws_linked_list_init(&join_list);
+
+ aws_mutex_lock(&s_managed_thread_lock);
+ /*
+ * Swap out the pending join threads before adding this, otherwise we'd join against ourselves which won't work
+ */
+ aws_linked_list_swap_contents(&join_list, &s_pending_join_managed_threads);
+ aws_linked_list_push_back(&s_pending_join_managed_threads, node);
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ /*
+ * Join against any finished threads. This thread (it's only ever going to be at most one)
+ * is guaranteed to:
+ * (1) Not be the current thread
+ * (2) Has already ran to user thread_function completion
+ */
+ aws_thread_join_and_free_wrapper_list(&join_list);
+}
+
+void aws_thread_initialize_thread_management(void) {
+ aws_linked_list_init(&s_pending_join_managed_threads);
+}