aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-05-11 12:12:06 +0300
committerthegeorg <thegeorg@yandex-team.ru>2022-05-11 12:12:06 +0300
commit62f93da087b2fec0f89979fd11ac4d754ca36253 (patch)
tree67bf8ceb55e2d079f3575f9a7373584ad407d2a5 /contrib/restricted
parent8d55620139d4309265409767f873ba83fe046418 (diff)
downloadydb-62f93da087b2fec0f89979fd11ac4d754ca36253.tar.gz
Update aws-c-common and aws-c-io
* Update `contrib/restricted/aws/aws-c-io` to 0.11.0 * Backport cJSON symbol renaming logic from aws-sdk-cpp upstream ref:396829235a01ed34888651ee38ebd76c95510d6b
Diffstat (limited to 'contrib/restricted')
-rw-r--r--contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report27
-rw-r--r--contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report33
-rw-r--r--contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt24
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt11
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt11
-rw-r--r--contrib/restricted/aws/aws-c-common/README.md4
-rw-r--r--contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h3
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h42
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h8
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl26
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/assert.h36
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/bus.h97
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h63
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl10
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/clock.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl80
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h49
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h1
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/error.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h302
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/file.h198
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h5
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/json.h348
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/logging.h16
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/macros.h10
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h1
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/platform.h9
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h2
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl4
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h39
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/promise.h95
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h40
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h43
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h1
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h13
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/string.h119
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h26
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h7
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/thread.h83
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h60
-rw-r--r--contrib/restricted/aws/aws-c-common/source/allocator.c112
-rw-r--r--contrib/restricted/aws/aws-c-common/source/allocator_sba.c55
-rw-r--r--contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c32
-rw-r--r--contrib/restricted/aws/aws-c-common/source/bus.c724
-rw-r--r--contrib/restricted/aws/aws-c-common/source/byte_buf.c104
-rw-r--r--contrib/restricted/aws/aws-c-common/source/command_line_parser.c57
-rw-r--r--contrib/restricted/aws/aws-c-common/source/common.c77
-rw-r--r--contrib/restricted/aws/aws-c-common/source/date_time.c34
-rw-r--r--contrib/restricted/aws/aws-c-common/source/error.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/external/cJSON.c3113
-rw-r--r--contrib/restricted/aws/aws-c-common/source/file.c171
-rw-r--r--contrib/restricted/aws/aws-c-common/source/hash_table.c10
-rw-r--r--contrib/restricted/aws/aws-c-common/source/json.c344
-rw-r--r--contrib/restricted/aws/aws-c-common/source/log_writer.c7
-rw-r--r--contrib/restricted/aws/aws-c-common/source/logging.c74
-rw-r--r--contrib/restricted/aws/aws-c-common/source/memtrace.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/file.c279
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/system_info.c145
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/thread.c153
-rw-r--r--contrib/restricted/aws/aws-c-common/source/priority_queue.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/process_common.c25
-rw-r--r--contrib/restricted/aws/aws-c-common/source/promise.c115
-rw-r--r--contrib/restricted/aws/aws-c-common/source/ref_count.c53
-rw-r--r--contrib/restricted/aws/aws-c-common/source/resource_name.c111
-rw-r--r--contrib/restricted/aws/aws-c-common/source/ring_buffer.c74
-rw-r--r--contrib/restricted/aws/aws-c-common/source/string.c179
-rw-r--r--contrib/restricted/aws/aws-c-common/source/task_scheduler.c5
-rw-r--r--contrib/restricted/aws/aws-c-common/source/thread_scheduler.c225
-rw-r--r--contrib/restricted/aws/aws-c-common/source/thread_shared.c167
-rw-r--r--contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report34
-rw-r--r--contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report63
-rw-r--r--contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt43
-rw-r--r--contrib/restricted/aws/aws-c-io/CMakeLists.darwin.txt27
-rw-r--r--contrib/restricted/aws/aws-c-io/CMakeLists.linux.txt27
-rw-r--r--contrib/restricted/aws/aws-c-io/CONTRIBUTING.md18
-rw-r--r--contrib/restricted/aws/aws-c-io/PKCS11.md55
-rw-r--r--contrib/restricted/aws/aws-c-io/README.md54
-rw-r--r--contrib/restricted/aws/aws-c-io/THIRD-PARTY-LICENSES.txt31
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/channel.h15
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h6
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h120
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/file_utils.h64
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h33
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/io.h112
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/logging.h2
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h94
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/private/pem_utils.h25
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/private/pki_utils.h (renamed from contrib/restricted/aws/aws-c-io/include/aws/io/pki_utils.h)28
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/private/tls_channel_handler_shared.h11
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h57
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/stream.h47
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h225
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/uri.h3
-rw-r--r--contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c27
-rw-r--r--contrib/restricted/aws/aws-c-io/source/channel.c26
-rw-r--r--contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c55
-rw-r--r--contrib/restricted/aws/aws-c-io/source/event_loop.c222
-rw-r--r--contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c21
-rw-r--r--contrib/restricted/aws/aws-c-io/source/file_utils_shared.c68
-rw-r--r--contrib/restricted/aws/aws-c-io/source/host_resolver.c550
-rw-r--r--contrib/restricted/aws/aws-c-io/source/io.c137
-rw-r--r--contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c28
-rw-r--r--contrib/restricted/aws/aws-c-io/source/pem_utils.c98
-rw-r--r--contrib/restricted/aws/aws-c-io/source/pkcs11.c1371
-rw-r--r--contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h265
-rw-r--r--contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11f.h939
-rw-r--r--contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11t.h2003
-rw-r--r--contrib/restricted/aws/aws-c-io/source/pkcs11_private.h167
-rw-r--r--contrib/restricted/aws/aws-c-io/source/pki_utils.c2
-rw-r--r--contrib/restricted/aws/aws-c-io/source/posix/file_utils.c69
-rw-r--r--contrib/restricted/aws/aws-c-io/source/posix/host_resolver.c5
-rw-r--r--contrib/restricted/aws/aws-c-io/source/posix/socket.c207
-rw-r--r--contrib/restricted/aws/aws-c-io/source/retry_strategy.c22
-rw-r--r--contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c795
-rw-r--r--contrib/restricted/aws/aws-c-io/source/socket_channel_handler.c9
-rw-r--r--contrib/restricted/aws/aws-c-io/source/standard_retry_strategy.c497
-rw-r--r--contrib/restricted/aws/aws-c-io/source/stream.c194
-rw-r--r--contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c494
-rw-r--r--contrib/restricted/aws/aws-c-io/source/tls_channel_handler_shared.c4
-rw-r--r--contrib/restricted/aws/aws-c-io/source/uri.c41
123 files changed, 16369 insertions, 1568 deletions
diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report
index db3048dc1e..89570540b1 100644
--- a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report
+++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.copyrights.report
@@ -49,6 +49,7 @@ BELONGS ya.make
include/aws/common/atomics_gnu.inl [5:5]
include/aws/common/atomics_gnu_old.inl [4:4]
include/aws/common/atomics_msvc.inl [5:5]
+ include/aws/common/bus.h [5:5]
include/aws/common/byte_buf.h [4:4]
include/aws/common/byte_order.h [5:5]
include/aws/common/byte_order.inl [5:5]
@@ -68,7 +69,9 @@ BELONGS ya.make
include/aws/common/error.inl [5:5]
include/aws/common/exports.h [4:4]
include/aws/common/fifo_cache.h [4:4]
+ include/aws/common/file.h [4:4]
include/aws/common/hash_table.h [5:5]
+ include/aws/common/json.h [5:5]
include/aws/common/lifo_cache.h [4:4]
include/aws/common/linked_hash_table.h [4:4]
include/aws/common/linked_list.h [5:5]
@@ -98,10 +101,11 @@ BELONGS ya.make
include/aws/common/private/byte_buf.h [4:4]
include/aws/common/private/dlloads.h [4:4]
include/aws/common/private/hash_table_impl.h [5:5]
+ include/aws/common/private/thread_shared.h [4:4]
include/aws/common/private/xml_parser_impl.h [5:5]
include/aws/common/process.h [4:4]
+ include/aws/common/promise.h [2:2]
include/aws/common/ref_count.h [5:5]
- include/aws/common/resource_name.h [2:2]
include/aws/common/ring_buffer.h [4:4]
include/aws/common/ring_buffer.inl [4:4]
include/aws/common/rw_lock.h [5:5]
@@ -113,6 +117,7 @@ BELONGS ya.make
include/aws/common/system_info.h [5:5]
include/aws/common/task_scheduler.h [5:5]
include/aws/common/thread.h [5:5]
+ include/aws/common/thread_scheduler.h [4:4]
include/aws/common/time.h [4:4]
include/aws/common/uuid.h [5:5]
include/aws/common/xml_parser.h [5:5]
@@ -126,6 +131,7 @@ BELONGS ya.make
source/arch/intel/encoding_avx2.c [2:2]
source/array_list.c [2:2]
source/assert.c [2:2]
+ source/bus.c [2:2]
source/byte_buf.c [2:2]
source/cache.c [2:2]
source/codegen.c [2:2]
@@ -137,7 +143,9 @@ BELONGS ya.make
source/encoding.c [2:2]
source/error.c [2:2]
source/fifo_cache.c [2:2]
+ source/file.c [2:2]
source/hash_table.c [2:2]
+ source/json.c [2:2]
source/lifo_cache.c [2:2]
source/linked_hash_table.c [2:2]
source/log_channel.c [2:2]
@@ -151,6 +159,7 @@ BELONGS ya.make
source/posix/condition_variable.c [2:2]
source/posix/device_random.c [2:2]
source/posix/environment.c [2:2]
+ source/posix/file.c [2:2]
source/posix/mutex.c [2:2]
source/posix/process.c [2:2]
source/posix/rw_lock.c [2:2]
@@ -159,12 +168,14 @@ BELONGS ya.make
source/posix/time.c [2:2]
source/priority_queue.c [2:2]
source/process_common.c [2:2]
+ source/promise.c [2:2]
source/ref_count.c [2:2]
- source/resource_name.c [2:2]
source/ring_buffer.c [2:2]
source/statistics.c [2:2]
source/string.c [2:2]
source/task_scheduler.c [2:2]
+ source/thread_scheduler.c [2:2]
+ source/thread_shared.c [2:2]
source/uuid.c [2:2]
source/xml_parser.c [2:2]
@@ -178,3 +189,15 @@ BELONGS ya.make
Match type : COPYRIGHT
Files with this license:
source/posix/time.c [15:15]
+
+KEEP COPYRIGHT_SERVICE_LABEL bdcf211d81a69c0f282fb7543c1a24a7
+BELONGS ya.make
+ License text:
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ include/aws/common/external/cJSON.h [2:2]
+ source/external/cJSON.c [2:2]
diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report
index ecd816ac72..5c9d7d309e 100644
--- a/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report
+++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/devtools.licenses.report
@@ -54,7 +54,7 @@ BELONGS ya.make
Match type : TEXT
Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain-disclaimer.LICENSE
Files with this license:
- include/aws/common/private/lookup3.inl [21:22]
+ include/aws/common/private/lookup3.inl [24:25]
KEEP Public-Domain 5b7627115f23e7c5f0d8e352a16d9353
BELONGS ya.make
@@ -66,7 +66,19 @@ BELONGS ya.make
Match type : NOTICE
Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
Files with this license:
- include/aws/common/private/lookup3.inl [6:6]
+ include/aws/common/private/lookup3.inl [9:9]
+
+KEEP MIT 5debb370f50e1dfd24ff5144233a2ef6
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: MIT
+ Score : 100.00
+ Match type : TEXT
+ Links : http://opensource.org/licenses/mit-license.php, https://spdx.org/licenses/MIT
+ Files with this license:
+ include/aws/common/external/cJSON.h [4:20]
+ source/external/cJSON.c [4:20]
KEEP Apache-2.0 6c901454b872854c0dea3ec06b67701a
BELONGS ya.make
@@ -91,7 +103,7 @@ BELONGS ya.make
Match type : REFERENCE
Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE
Files with this license:
- include/aws/common/private/lookup3.inl [16:16]
+ include/aws/common/private/lookup3.inl [19:19]
KEEP Apache-2.0 7b04071babb9b8532292659e4abba7e3
BELONGS ya.make
@@ -115,6 +127,7 @@ BELONGS ya.make
Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
Files with this license:
source/arch/arm/asm/cpuid.c [4:13]
+ source/bus.c [4:13]
KEEP Apache-2.0 d591512e466bb957030b8857f753349e
BELONGS ya.make
@@ -137,6 +150,7 @@ BELONGS ya.make
include/aws/common/atomics_gnu.inl [6:6]
include/aws/common/atomics_gnu_old.inl [5:5]
include/aws/common/atomics_msvc.inl [6:6]
+ include/aws/common/bus.h [6:6]
include/aws/common/byte_buf.h [5:5]
include/aws/common/byte_order.h [6:6]
include/aws/common/byte_order.inl [6:6]
@@ -156,7 +170,9 @@ BELONGS ya.make
include/aws/common/error.inl [6:6]
include/aws/common/exports.h [5:5]
include/aws/common/fifo_cache.h [5:5]
+ include/aws/common/file.h [5:5]
include/aws/common/hash_table.h [6:6]
+ include/aws/common/json.h [6:6]
include/aws/common/lifo_cache.h [5:5]
include/aws/common/linked_hash_table.h [5:5]
include/aws/common/linked_list.h [6:6]
@@ -186,10 +202,11 @@ BELONGS ya.make
include/aws/common/private/byte_buf.h [5:5]
include/aws/common/private/dlloads.h [5:5]
include/aws/common/private/hash_table_impl.h [6:6]
+ include/aws/common/private/thread_shared.h [5:5]
include/aws/common/private/xml_parser_impl.h [6:6]
include/aws/common/process.h [5:5]
+ include/aws/common/promise.h [3:3]
include/aws/common/ref_count.h [6:6]
- include/aws/common/resource_name.h [3:3]
include/aws/common/ring_buffer.h [5:5]
include/aws/common/ring_buffer.inl [5:5]
include/aws/common/rw_lock.h [6:6]
@@ -201,6 +218,7 @@ BELONGS ya.make
include/aws/common/system_info.h [6:6]
include/aws/common/task_scheduler.h [6:6]
include/aws/common/thread.h [6:6]
+ include/aws/common/thread_scheduler.h [5:5]
include/aws/common/time.h [5:5]
include/aws/common/uuid.h [6:6]
include/aws/common/xml_parser.h [6:6]
@@ -224,7 +242,9 @@ BELONGS ya.make
source/encoding.c [3:3]
source/error.c [3:3]
source/fifo_cache.c [3:3]
+ source/file.c [3:3]
source/hash_table.c [3:3]
+ source/json.c [3:3]
source/lifo_cache.c [3:3]
source/linked_hash_table.c [3:3]
source/log_channel.c [3:3]
@@ -238,6 +258,7 @@ BELONGS ya.make
source/posix/condition_variable.c [3:3]
source/posix/device_random.c [3:3]
source/posix/environment.c [3:3]
+ source/posix/file.c [3:3]
source/posix/mutex.c [3:3]
source/posix/process.c [3:3]
source/posix/rw_lock.c [3:3]
@@ -246,12 +267,14 @@ BELONGS ya.make
source/posix/time.c [3:3]
source/priority_queue.c [3:3]
source/process_common.c [3:3]
+ source/promise.c [3:3]
source/ref_count.c [3:3]
- source/resource_name.c [3:3]
source/ring_buffer.c [3:3]
source/statistics.c [3:3]
source/string.c [3:3]
source/task_scheduler.c [3:3]
+ source/thread_scheduler.c [3:3]
+ source/thread_shared.c [3:3]
source/uuid.c [3:3]
source/xml_parser.c [3:3]
diff --git a/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt b/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt
index c74e7c0ea5..a950dafebe 100644
--- a/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt
+++ b/contrib/restricted/aws/aws-c-common/.yandex_meta/licenses.list.txt
@@ -264,11 +264,35 @@ This library is licensed under the Apache 2.0 License.
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+====================COPYRIGHT====================
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+
====================File: NOTICE====================
AWS C Common
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+====================MIT====================
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
====================Public-Domain====================
* The following public domain code has been modified as follows:
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
index 3728d92779..39146acbd8 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin.txt
@@ -10,10 +10,12 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DCJSON_HIDE_SYMBOLS
-DHAVE_AVX2_INTRINSICS
-DHAVE_MM256_EXTRACT_EPI64
-DHAVE_SYSCONF
-DUSE_SIMD_ENCODING
+ -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE
)
target_include_directories(restricted-aws-aws-c-common PUBLIC
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/generated/include
@@ -33,6 +35,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
@@ -43,8 +46,11 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/encoding.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/error.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/fifo_cache.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/hash_table.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/json.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/lifo_cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/log_channel.c
@@ -58,6 +64,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/mutex.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/process.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c
@@ -66,12 +73,14 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/time.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/priority_queue.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/process_common.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/promise.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ref_count.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/resource_name.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/statistics.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/string.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_shared.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/uuid.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/xml_parser.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
index cbb6afb997..6de6ac5160 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.linux.txt
@@ -10,10 +10,12 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DCJSON_HIDE_SYMBOLS
-DHAVE_AVX2_INTRINSICS
-DHAVE_MM256_EXTRACT_EPI64
-DHAVE_SYSCONF
-DUSE_SIMD_ENCODING
+ -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR
)
target_include_directories(restricted-aws-aws-c-common PUBLIC
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/generated/include
@@ -29,6 +31,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
@@ -39,8 +42,11 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/encoding.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/error.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/fifo_cache.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/hash_table.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/json.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/lifo_cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/log_channel.c
@@ -54,6 +60,7 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/device_random.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/file.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/mutex.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/process.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c
@@ -62,12 +69,14 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/posix/time.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/priority_queue.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/process_common.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/promise.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ref_count.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/resource_name.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/statistics.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/string.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/thread_shared.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/uuid.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/xml_parser.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c
diff --git a/contrib/restricted/aws/aws-c-common/README.md b/contrib/restricted/aws/aws-c-common/README.md
index 054c918735..95f4191c6d 100644
--- a/contrib/restricted/aws/aws-c-common/README.md
+++ b/contrib/restricted/aws/aws-c-common/README.md
@@ -121,8 +121,8 @@ have pre-slotted log subjects & error codes for each library. The currently allo
| [0x2C00, 0x3000) | aws-crt-nodejs |
| [0x3000, 0x3400) | aws-crt-dotnet |
| [0x3400, 0x3800) | aws-c-iot |
-| [0x3800, 0x3C00) | (reserved for future project) |
-| [0x3C00, 0x4000) | (reserved for future project) |
+| [0x3800, 0x3C00) | aws-c-s3 |
+| [0x3C00, 0x4000) | aws-c-sdkutils |
| [0x4000, 0x4400) | (reserved for future project) |
| [0x4400, 0x4800) | (reserved for future project) |
diff --git a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
index decbdf88f0..f152531c17 100644
--- a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
+++ b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
@@ -15,6 +15,9 @@
#define AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS
#define AWS_HAVE_GCC_INLINE_ASM
/* #undef AWS_HAVE_MSVC_MULX */
+#define AWS_HAVE_POSIX_LARGE_FILE_SUPPORT
/* #undef AWS_HAVE_EXECINFO */
+/* #undef AWS_HAVE_WINAPI_DESKTOP */
+#define AWS_HAVE_LINUX_IF_LINK_H
#endif
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h b/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h
index ba4d9d5c17..9d7f2bb500 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/allocator.h
@@ -52,14 +52,20 @@ void aws_wrapped_cf_allocator_destroy(CFAllocatorRef allocator);
#endif
/**
- * Returns at least `size` of memory ready for usage or returns NULL on failure.
+ * Returns at least `size` of memory ready for usage. In versions v0.6.8 and prior, this function was allowed to return
+ * NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_acquire(struct aws_allocator *allocator, size_t size);
/**
* Allocates a block of memory for an array of num elements, each of them size bytes long, and initializes all its bits
- * to zero. Returns null on failure.
+ * to zero. In versions v0.6.8 and prior, this function was allowed to return NULL.
+ * In later versions, if allocator->mem_calloc() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_calloc() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
@@ -72,6 +78,11 @@ void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
* in the same contiguous block of memory.
*
* Returns a pointer to the allocation.
+ *
+ * In versions v0.6.8 and prior, this function was allowed to return
+ * NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...);
@@ -83,13 +94,15 @@ void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...);
AWS_COMMON_API
void aws_mem_release(struct aws_allocator *allocator, void *ptr);
-/*
+/**
* Attempts to adjust the size of the pointed-to memory buffer from oldsize to
* newsize. The pointer (*ptr) may be changed if the memory needs to be
* reallocated.
*
- * If reallocation fails, *ptr is unchanged, and this method raises an
- * AWS_ERROR_OOM error.
+ * In versions v0.6.8 and prior, this function was allowed to return
+ * NULL. In later versions, if allocator->mem_realloc() returns NULL, this function will assert and exit. To handle
+ * conditions where OOM is not a fatal error, allocator->mem_realloc() is responsible for finding/reclaiming/running a
+ * GC etc...before returning.
*/
AWS_COMMON_API
int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, size_t newsize);
@@ -166,6 +179,25 @@ struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *alloca
AWS_COMMON_API
void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator);
+/*
+ * Returns the number of bytes currently active in the SBA
+ */
+AWS_COMMON_API
+size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator);
+
+/*
+ * Returns the number of bytes reserved in pages/bins inside the SBA, e.g. the
+ * current system memory used by the SBA
+ */
+AWS_COMMON_API
+size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator);
+
+/*
+ * Returns the page size that the SBA is using
+ */
+AWS_COMMON_API
+size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator);
+
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ALLOCATOR_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h
index 1eb7f773cf..bbd50ead32 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.h
@@ -88,6 +88,14 @@ AWS_STATIC_IMPL
int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *val);
/**
+ * Pushes the memory pointed to by val onto the front of internal list.
+ * This call results in shifting all of the elements in the list. Avoid this call unless that
+ * is intended behavior.
+ */
+AWS_STATIC_IMPL
+int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val);
+
+/**
* Deletes the element at the front of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised.
* This call results in shifting all of the elements at the end of the array to the front. Avoid this call unless that
* is intended behavior.
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
index d3ca30ecda..4e64a96a66 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
@@ -162,6 +162,32 @@ int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *v
}
AWS_STATIC_IMPL
+int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val) {
+ AWS_PRECONDITION(aws_array_list_is_valid(list));
+ AWS_PRECONDITION(
+ val && AWS_MEM_IS_READABLE(val, list->item_size),
+ "Input pointer [val] must point writable memory of [list->item_size] bytes.");
+ size_t orig_len = aws_array_list_length(list);
+ int err_code = aws_array_list_ensure_capacity(list, orig_len);
+
+ if (err_code && aws_last_error() == AWS_ERROR_INVALID_INDEX && !list->alloc) {
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return aws_raise_error(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE);
+ } else if (err_code) {
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return err_code;
+ }
+ if (orig_len) {
+ memmove((uint8_t *)list->data + list->item_size, list->data, orig_len * list->item_size);
+ }
+ ++list->length;
+ memcpy(list->data, val, list->item_size);
+
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return err_code;
+}
+
+AWS_STATIC_IMPL
int aws_array_list_pop_front(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
if (aws_array_list_length(list) > 0) {
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h b/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
index 7ab9973ef2..e7ce341ce0 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
@@ -19,6 +19,24 @@ void aws_fatal_assert(const char *cond_str, const char *file, int line) AWS_ATTR
AWS_EXTERN_C_END
#if defined(CBMC)
+# define AWS_PANIC_OOM(mem, msg) \
+ do { \
+ if (!(mem)) { \
+ fprintf(stderr, "%s: %s, line %d", msg, __FILE__, __LINE__); \
+ exit(-1); \
+ } \
+ } while (0)
+#else
+# define AWS_PANIC_OOM(mem, msg) \
+ do { \
+ if (!(mem)) { \
+ fprintf(stderr, "%s", msg); \
+ abort(); \
+ } \
+ } while (0)
+#endif /* defined(CBMC) */
+
+#if defined(CBMC)
# define AWS_ASSUME(cond) __CPROVER_assume(cond)
#elif defined(_MSC_VER)
# define AWS_ASSUME(cond) __assume(cond)
@@ -86,8 +104,8 @@ AWS_EXTERN_C_END
# define AWS_POSTCONDITION1(cond) __CPROVER_assert((cond), # cond " check failed")
# define AWS_FATAL_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation))
# define AWS_FATAL_POSTCONDITION1(cond) __CPROVER_assert((cond), # cond " check failed")
-# define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
-# define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
+# define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
+# define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
#else
# define AWS_PRECONDITION2(cond, expl) AWS_ASSERT(cond)
# define AWS_PRECONDITION1(cond) AWS_ASSERT(cond)
@@ -98,12 +116,22 @@ AWS_EXTERN_C_END
# define AWS_FATAL_POSTCONDITION2(cond, expl) AWS_FATAL_ASSERT(cond)
# define AWS_FATAL_POSTCONDITION1(cond) AWS_FATAL_ASSERT(cond)
/**
+ * These macros should not be used in is_valid functions.
+ * All validate functions are also used in assumptions for CBMC proofs,
+ * which should not contain __CPROVER_*_ok primitives. The use of these primitives
+ * in assumptions may lead to spurious results.
* The C runtime does not give a way to check these properties,
* but we can at least check that the pointer is valid. */
-# define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (base))
-# define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base))
+# define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (base))
+# define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (base))
#endif /* CBMC */
+/**
+ * These macros can safely be used in validate functions.
+ */
+#define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (base))
+#define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base))
+
/* Logical consequence. */
#define AWS_IMPLIES(a, b) (!(a) || (b))
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h b/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h
new file mode 100644
index 0000000000..fe5127e6f7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h
@@ -0,0 +1,97 @@
+#ifndef AWS_COMMON_BUS_H
+#define AWS_COMMON_BUS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+
+/*
+ * A message bus is a mapping of integer message addresses/types -> listeners/callbacks.
+ * A listener can listen to a single message, or to all messages on a bus
+ * Message addresses/types can be any 64-bit integer, starting at 1.
+ * AWS_BUS_ADDRESS_ALL (0xffffffffffffffff) is reserved for broadcast to all listeners.
+ * AWS_BUS_ADDRESS_CLOSE (0) is reserved for notifying listeners to clean up
+ * Listeners will be sent a message of type AWS_BUS_ADDRESS_CLOSE when it is time to clean any state up.
+ * Listeners are owned by the subscriber, and are no longer referenced by the bus once unsubscribed.
+ * Under the AWS_BUS_ASYNC policy, message delivery happens in a separate thread from sending, so listeners are
+ * responsible for their own thread safety.
+ */
+struct aws_bus;
+
+enum aws_bus_policy {
+ /**
+ * Messages will be delivered, even if dynamic allocation is required. Default.
+ */
+ AWS_BUS_ASYNC_RELIABLE = 0x0,
+ /**
+ * Only memory from the bus's internal buffer will be used (if a buffer size is supplied at bus creation time).
+ * If the buffer is full, older buffered messages will be discarded to make room for newer messages.
+ */
+ AWS_BUS_ASYNC_UNRELIABLE = 0x1,
+ /**
+ * Message delivery is immediate, and therefore reliable by definition
+ */
+ AWS_BUS_SYNC_RELIABLE = 0x2,
+};
+
+/**
+ * Subscribing to AWS_BUS_ADDRESS_ALL will cause the listener to be invoked for every message sent to the bus
+ * It is possible to send to AWS_BUS_ADDRESS_ALL, just be aware that this will only send to listeners subscribed
+ * to AWS_BUS_ADDRESS_ALL.
+ */
+#define AWS_BUS_ADDRESS_ALL ((uint64_t)-1)
+#define AWS_BUS_ADDRESS_CLOSE 0
+
+struct aws_bus_options {
+ enum aws_bus_policy policy;
+ /**
+ * Size of buffer for unreliable message delivery queue.
+ * Unused if policy is AWS_BUS_ASYNC_RELIABNLE or AWS_BUS_SYNC_RELIABLE
+ * Messages are 40 bytes. Default buffer_size is 4K. The bus will not allocate memory beyond this size.
+ */
+ size_t buffer_size;
+ /* Not supported yet, but event loop group for delivery */
+ struct aws_event_loop_group *event_loop_group;
+};
+
+/* Signature for listener callbacks */
+typedef void(aws_bus_listener_fn)(uint64_t address, const void *payload, void *user_data);
+
+/**
+ * Allocates and initializes a message bus
+ */
+AWS_COMMON_API
+struct aws_bus *aws_bus_new(struct aws_allocator *allocator, const struct aws_bus_options *options);
+
+/**
+ * Cleans up a message bus, including notifying all remaining listeners to close
+ */
+AWS_COMMON_API
+void aws_bus_destroy(struct aws_bus *bus);
+
+/**
+ * Subscribes a listener to a message type. user_data's lifetime is the responsibility of the subscriber.
+ */
+AWS_COMMON_API
+int aws_bus_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data);
+
+/**
+ * Unsubscribe a listener from a specific message. This is only necessary if the listener has lifetime concerns.
+ * Otherwise, the listener will be called with an address of AWS_BUS_ADDRESS_CLOSE, which indicates that user_data
+ * can be cleaned up if necessary and the listener will never be called again.
+ */
+AWS_COMMON_API
+void aws_bus_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data);
+
+/**
+ * Sends a message to any listeners. payload will live until delivered, and then the destructor (if
+ * provided) will be called. Note that anything payload references must also live at least until it is destroyed.
+ * Will return AWS_OP_ERR if the bus is closing/has been closed
+ */
+AWS_COMMON_API
+int aws_bus_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *));
+
+#endif /* AWS_COMMON_BUS_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h
index 8e79a93b27..e6464b4780 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_buf.h
@@ -125,6 +125,15 @@ AWS_COMMON_API int aws_byte_buf_init_copy(
const struct aws_byte_buf *src);
/**
+ * Reads 'filename' into 'out_buf'. If successful, 'out_buf' is allocated and filled with the data;
+ * It is your responsibility to call 'aws_byte_buf_clean_up()' on it. Otherwise, 'out_buf' remains
+ * unused. In the very unfortunate case where some API needs to treat out_buf as a c_string, a null terminator
+ * is appended, but is not included as part of the length field.
+ */
+AWS_COMMON_API
+int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename);
+
+/**
* Evaluates the set of properties that define the shape of all valid aws_byte_buf structures.
* It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion).
*/
@@ -496,6 +505,20 @@ AWS_COMMON_API
bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str);
/**
+ * Return true if the input starts with the prefix (exact byte comparison).
+ */
+AWS_COMMON_API
+bool aws_byte_cursor_starts_with(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix);
+
+/**
+ * Return true if the input starts with the prefix (case-insensitive).
+ * The "C" locale is used for comparing upper and lowercase letters.
+ * Data is assumed to be ASCII text, UTF-8 will work fine too.
+ */
+AWS_COMMON_API
+bool aws_byte_cursor_starts_with_ignore_case(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix);
+
+/**
* Case-insensitive hash function for array containing ASCII or UTF-8 text.
*/
AWS_COMMON_API
@@ -873,6 +896,46 @@ AWS_COMMON_API bool aws_isxdigit(uint8_t ch);
*/
AWS_COMMON_API bool aws_isspace(uint8_t ch);
+/**
+ * Read entire cursor as ASCII/UTF-8 unsigned base-10 number.
+ * Stricter than strtoull(), which allows whitespace and inputs that start with "0x"
+ *
+ * Examples:
+ * "0" -> 0
+ * "123" -> 123
+ * "00004" -> 4 // leading zeros ok
+ *
+ * Rejects things like:
+ * "-1" // negative numbers not allowed
+ * "1,000" // only characters 0-9 allowed
+ * "" // blank string not allowed
+ * " 0 " // whitespace not allowed
+ * "0x0" // hex not allowed
+ * "FF" // hex not allowed
+ * "999999999999999999999999999999999999999999" // larger than max u64
+ */
+AWS_COMMON_API
+int aws_byte_cursor_utf8_parse_u64(struct aws_byte_cursor cursor, uint64_t *dst);
+
+/**
+ * Read entire cursor as ASCII/UTF-8 unsigned base-16 number with NO "0x" prefix.
+ *
+ * Examples:
+ * "F" -> 15
+ * "000000ff" -> 255 // leading zeros ok
+ * "Ff" -> 255 // mixed case ok
+ * "123" -> 291
+ * "FFFFFFFFFFFFFFFF" -> 18446744073709551616 // max u64
+ *
+ * Rejects things like:
+ * "0x0" // 0x prefix not allowed
+ * "" // blank string not allowed
+ * " F " // whitespace not allowed
+ * "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" // larger than max u64
+ */
+AWS_COMMON_API
+int aws_byte_cursor_utf8_parse_u64_hex(struct aws_byte_cursor cursor, uint64_t *dst);
+
AWS_EXTERN_C_END
#endif /* AWS_COMMON_BYTE_BUF_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl
index 2ba777909c..1204be06a1 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/byte_order.inl
@@ -9,7 +9,7 @@
#include <aws/common/byte_order.h>
#include <aws/common/common.h>
-#ifdef _MSC_VER
+#ifdef _WIN32
# include <stdlib.h>
#else
# include <netinet/in.h>
@@ -59,7 +59,7 @@ AWS_STATIC_IMPL uint64_t aws_ntoh64(uint64_t x) {
* Convert 32 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint32_t aws_hton32(uint32_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ulong(x);
#else
return htonl(x);
@@ -116,7 +116,7 @@ AWS_STATIC_IMPL double aws_htonf64(double x) {
* Convert 32 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint32_t aws_ntoh32(uint32_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ulong(x);
#else
return ntohl(x);
@@ -141,7 +141,7 @@ AWS_STATIC_IMPL double aws_ntohf64(double x) {
* Convert 16 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ushort(x);
#else
return htons(x);
@@ -152,7 +152,7 @@ AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x) {
* Convert 16 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint16_t aws_ntoh16(uint16_t x) {
-#ifdef _MSC_VER
+#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ushort(x);
#else
return ntohs(x);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h
index 489a5f19a1..68b202f1bd 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.h
@@ -32,6 +32,12 @@ AWS_STATIC_IMPL uint64_t aws_timestamp_convert(
uint64_t *remainder);
/**
+ * More general form of aws_timestamp_convert that takes arbitrary frequencies rather than the timestamp enum.
+ */
+AWS_STATIC_IMPL uint64_t
+ aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder);
+
+/**
* Get ticks in nanoseconds (usually 100 nanosecond precision) on the high resolution clock (most-likely TSC). This
* clock has no bearing on the actual system time. On success, timestamp will be set.
*/
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl
index d741a43419..d0a1b8c253 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/clock.inl
@@ -15,31 +15,75 @@ AWS_EXTERN_C_BEGIN
/**
* Converts 'timestamp' from unit 'convert_from' to unit 'convert_to', if the units are the same then 'timestamp' is
* returned. If 'remainder' is NOT NULL, it will be set to the remainder if convert_from is a more precise unit than
- * convert_to. To avoid unnecessary branching, 'remainder' is not zero initialized in this function, be sure to set it
- * to 0 first if you care about that kind of thing. If conversion would lead to integer overflow, the timestamp
- * returned will be the highest possible time that is representable, i.e. UINT64_MAX.
+ * convert_to (but only if the old frequency is a multiple of the new one). If conversion would lead to integer
+ * overflow, the timestamp returned will be the highest possible time that is representable, i.e. UINT64_MAX.
*/
+AWS_STATIC_IMPL uint64_t
+ aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder) {
+
+ AWS_FATAL_ASSERT(old_frequency > 0 && new_frequency > 0);
+
+ /*
+ * The remainder, as defined in the contract of the original version of this function, only makes mathematical
+ * sense when the old frequency is a positive multiple of the new frequency. The new convert function needs to be
+ * backwards compatible with the old version's remainder while being a lot more accurate with its conversions
+ * in order to handle extreme edge cases of large numbers.
+ */
+ if (remainder != NULL) {
+ *remainder = 0;
+ /* only calculate remainder when going from a higher to lower frequency */
+ if (new_frequency < old_frequency) {
+ uint64_t frequency_remainder = old_frequency % new_frequency;
+ /* only calculate remainder when the old frequency is evenly divisible by the new one */
+ if (frequency_remainder == 0) {
+ uint64_t frequency_ratio = old_frequency / new_frequency;
+ *remainder = ticks % frequency_ratio;
+ }
+ }
+ }
+
+ /*
+ * Now do the actual conversion.
+ */
+ uint64_t old_seconds_elapsed = ticks / old_frequency;
+ uint64_t old_remainder = ticks - old_seconds_elapsed * old_frequency;
+
+ uint64_t new_ticks_whole_part = aws_mul_u64_saturating(old_seconds_elapsed, new_frequency);
+
+ /*
+ * This could be done in one of three ways:
+ *
+ * (1) (old_remainder / old_frequency) * new_frequency - this would be completely wrong since we know that
+ * (old_remainder / old_frequency) < 1 = 0
+ *
+ * (2) old_remainder * (new_frequency / old_frequency) - this only gives a good solution when new_frequency is
+ * a multiple of old_frequency
+ *
+ * (3) (old_remainder * new_frequency) / old_frequency - this is how we do it below, the primary concern is if
+ * the initial multiplication can overflow. For that to be the case, we would need to be using old and new
+ * frequencies in the billions. This does not appear to be the case in any current machine's hardware counters.
+ *
+ * Ignoring arbitrary frequencies, even a nanosecond to nanosecond conversion would not overflow either.
+ *
+ * If this did become an issue, we would potentially need to use intrinsics/platform support for 128 bit math.
+ *
+ * For review consideration:
+ * (1) should we special case frequencies being a multiple of the other?
+ * (2) should we special case frequencies being the same? A ns-to-ns conversion does the full math and
+ * approaches overflow (but cannot actually do so).
+ */
+ uint64_t new_ticks_remainder_part = aws_mul_u64_saturating(old_remainder, new_frequency) / old_frequency;
+
+ return aws_add_u64_saturating(new_ticks_whole_part, new_ticks_remainder_part);
+}
+
AWS_STATIC_IMPL uint64_t aws_timestamp_convert(
uint64_t timestamp,
enum aws_timestamp_unit convert_from,
enum aws_timestamp_unit convert_to,
uint64_t *remainder) {
- uint64_t diff = 0;
-
- if (convert_to > convert_from) {
- diff = convert_to / convert_from;
- return aws_mul_u64_saturating(timestamp, diff);
- } else if (convert_to < convert_from) {
- diff = convert_from / convert_to;
- if (remainder) {
- *remainder = timestamp % diff;
- }
-
- return timestamp / diff;
- } else {
- return timestamp;
- }
+ return aws_timestamp_convert_u64(timestamp, convert_from, convert_to, remainder);
}
AWS_EXTERN_C_END
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h b/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
index 8b31ae98ef..7184dcd68a 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
@@ -12,6 +12,21 @@ enum aws_cli_options_has_arg {
AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT = 2,
};
+/**
+ * Invoked when a subcommand is encountered. argc and argv[] begins at the command encounterd.
+ * command_name is the name of the command being handled.
+ */
+typedef int(aws_cli_options_subcommand_fn)(int argc, char *const argv[], const char *command_name, void *user_data);
+
+/**
+ * Dispatch table to dispatch cli commands from.
+ * command_name should be the exact string for the command you want to handle from the command line.
+ */
+struct aws_cli_subcommand_dispatch {
+ aws_cli_options_subcommand_fn *subcommand_fn;
+ const char *command_name;
+};
+
/* Ignoring padding since we're trying to maintain getopt.h compatibility */
/* NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) */
struct aws_cli_option {
@@ -34,6 +49,11 @@ AWS_COMMON_API extern int aws_cli_optind;
AWS_COMMON_API extern const char *aws_cli_optarg;
/**
+ * If 0x02 was returned by aws_cli_getopt_long(), this value will be set to the argument encountered.
+ */
+AWS_COMMON_API extern const char *aws_cli_positional_arg;
+
+/**
* A mostly compliant implementation of posix getopt_long(). Parses command-line arguments. argc is the number of
* command line arguments passed in argv. optstring contains the legitimate option characters. The option characters
* coorespond to aws_cli_option::val. If the character is followed by a :, the option requires an argument. If it is
@@ -45,7 +65,8 @@ AWS_COMMON_API extern const char *aws_cli_optarg;
* If longindex is non-null, it will be set to the index in longopts, for the found option.
*
* Returns option val if it was found, '?' if an option was encountered that was not specified in the option string,
- * returns -1 when all arguments that can be parsed have been parsed.
+ * 0x02 (START_OF_TEXT) will be returned if a positional argument was encountered. returns -1 when all arguments that
+ * can be parsed have been parsed.
*/
AWS_COMMON_API int aws_cli_getopt_long(
int argc,
@@ -53,6 +74,32 @@ AWS_COMMON_API int aws_cli_getopt_long(
const char *optstring,
const struct aws_cli_option *longopts,
int *longindex);
+
+/**
+ * Resets global parser state for use in another parser run for the application.
+ */
+AWS_COMMON_API void aws_cli_reset_state(void);
+
+/**
+ * Dispatches the current command line arguments with a subcommand from the second input argument in argv[], if
+ * dispatch table contains a command that matches the argument. When the command is dispatched, argc and argv will be
+ * updated to reflect the new argument count. The cli options are required to come after the subcommand. If either, no
+ * dispatch was found or there was no argument passed to the program, this function will return AWS_OP_ERR. Check
+ * aws_last_error() for details on the error.
+ * @param argc number of arguments passed to int main()
+ * @param argv the arguments passed to int main()
+ * @param parse_cb, optional, specify NULL if you don't want to handle this. This argument is for parsing "meta"
+ * commands from the command line options prior to dispatch occurring.
+ * @param dispatch_table table containing functions and command name to dispatch on.
+ * @param table_length numnber of entries in dispatch_table.
+ * @return AWS_OP_SUCCESS(0) on success, AWS_OP_ERR(-1) on failure
+ */
+AWS_COMMON_API int aws_cli_dispatch_on_subcommand(
+ int argc,
+ char *const argv[],
+ struct aws_cli_subcommand_dispatch *dispatch_table,
+ int table_length,
+ void *user_data);
AWS_EXTERN_C_END
#endif /* AWS_COMMON_COMMAND_LINE_PARSER_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h b/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h
index 5522c4fae5..40d83f864f 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/date_time.h
@@ -48,6 +48,7 @@ enum aws_date_day_of_week {
struct aws_date_time {
time_t timestamp;
+ uint16_t milliseconds;
char tz[6];
struct tm gmt_time;
struct tm local_time;
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
index 200de33146..f12fc730b9 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
@@ -143,6 +143,7 @@ AWS_EXTERN_C_END
enum aws_common_error {
AWS_ERROR_SUCCESS = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID),
AWS_ERROR_OOM,
+ AWS_ERROR_NO_SPACE,
AWS_ERROR_UNKNOWN,
AWS_ERROR_SHORT_BUFFER,
AWS_ERROR_OVERFLOW_DETECTED,
@@ -190,7 +191,10 @@ enum aws_common_error {
AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED,
AWS_ERROR_STRING_MATCH_NOT_FOUND,
AWS_ERROR_DIVIDE_BY_ZERO,
-
+ AWS_ERROR_INVALID_FILE_HANDLE,
+ AWS_ERROR_OPERATION_INTERUPTED,
+ AWS_ERROR_DIRECTORY_NOT_EMPTY,
+ AWS_ERROR_PLATFORM_NOT_SUPPORTED,
AWS_ERROR_END_COMMON_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h b/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h
new file mode 100644
index 0000000000..3210e8ab37
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h
@@ -0,0 +1,302 @@
+/*
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+/*
+ * This file has been modified from its original version by Amazon:
+ * (1) Address clang-tidy errors by renaming function parameters in a number of places
+ * to match their .c counterparts.
+ * (2) Misc tweaks to unchecked writes to make security static analysis happier
+ */
+
+/* clang-format off */
+
+#ifndef AWS_COMMON_EXTERNAL_CJSON_H // NOLINT
+#define AWS_COMMON_EXTERNAL_CJSON_H // NOLINT
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
+#define __WINDOWS__
+#endif
+
+#ifdef __WINDOWS__
+
+/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options:
+
+CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols
+CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default)
+CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol
+
+For *nix builds that support visibility attribute, you can define similar behavior by
+
+setting default visibility to hidden by adding
+-fvisibility=hidden (for gcc)
+or
+-xldscope=hidden (for sun cc)
+to CFLAGS
+
+then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does
+
+*/
+
+#define CJSON_CDECL __cdecl
+#define CJSON_STDCALL __stdcall
+
+/* export symbols by default, this is necessary for copy pasting the C and header file */
+#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_EXPORT_SYMBOLS
+#endif
+
+#if defined(CJSON_HIDE_SYMBOLS)
+#define CJSON_PUBLIC(type) type CJSON_STDCALL
+#elif defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
+#elif defined(CJSON_IMPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
+#endif
+#else /* !__WINDOWS__ */
+#define CJSON_CDECL
+#define CJSON_STDCALL
+
+#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY)
+#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
+#else
+#define CJSON_PUBLIC(type) type
+#endif
+#endif
+
+/* project version */
+#define CJSON_VERSION_MAJOR 1
+#define CJSON_VERSION_MINOR 7
+#define CJSON_VERSION_PATCH 15
+
+#include <stddef.h>
+
+/* cJSON Types: */
+#define cJSON_Invalid (0)
+#define cJSON_False (1 << 0)
+#define cJSON_True (1 << 1)
+#define cJSON_NULL (1 << 2)
+#define cJSON_Number (1 << 3)
+#define cJSON_String (1 << 4)
+#define cJSON_Array (1 << 5)
+#define cJSON_Object (1 << 6)
+#define cJSON_Raw (1 << 7) /* raw json */
+
+#define cJSON_IsReference 256
+#define cJSON_StringIsConst 512
+
+/* The cJSON structure: */
+typedef struct cJSON
+{
+ /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */
+ struct cJSON *next;
+ struct cJSON *prev;
+ /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */
+ struct cJSON *child;
+
+ /* The type of the item, as above. */
+ int type;
+
+ /* The item's string, if type==cJSON_String and type == cJSON_Raw */
+ char *valuestring;
+ /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */
+ int valueint;
+ /* The item's number, if type==cJSON_Number */
+ double valuedouble;
+
+ /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */
+ char *string;
+} cJSON;
+
+typedef struct cJSON_Hooks
+{
+ /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */
+ void *(CJSON_CDECL *malloc_fn)(size_t sz); // NOLINT
+ void (CJSON_CDECL *free_fn)(void *ptr);
+} cJSON_Hooks;
+
+typedef int cJSON_bool;
+
+/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them.
+* This is to prevent stack overflows. */
+#ifndef CJSON_NESTING_LIMIT
+#define CJSON_NESTING_LIMIT 1000
+#endif
+
+/* returns the version of cJSON as a string */
+CJSON_PUBLIC(const char*) cJSON_Version(void);
+
+/* Supply malloc, realloc and free functions to cJSON */
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks);
+
+/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */
+/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length);
+/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */
+/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated);
+
+/* Render a cJSON entity to text for transfer/storage. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
+/* Render a cJSON entity to text for transfer/storage without any formatting. */
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
+/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
+/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */
+/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format);
+/* Delete a cJSON entity and all subentities. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
+
+/* Returns the number of items in an array (or object). */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
+/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
+/* Get item "string" from object. Case insensitive. */
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string);
+/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
+
+/* Check item type and return its value */
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item);
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item);
+
+/* These functions check the type of an item */
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item);
+
+/* These calls create a cJSON item of the appropriate type. */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
+/* raw json */
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
+
+/* Create a string where valuestring references a string so
+* it will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
+/* Create an object/array that only references it's elements so
+* they will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
+
+/* These utilities create an Array of count items.
+* The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count);
+
+/* Append item to the specified array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
+/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object.
+* WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before
+* writing to `item->string` */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
+/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
+
+/* Remove/Detach items from Arrays/Objects. */
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
+
+/* Update array items. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem);
+
+/* Duplicate a cJSON item */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
+/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will
+* need to be released. With recurse!=0, it will duplicate any children connected to the item.
+* The item->next and ->prev pointers are always zero on return from Duplicate. */
+/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal.
+* case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); // NOLINT
+
+/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings.
+* The input pointer json cannot point to a read-only address area, such as a string constant,
+* but should point to a readable and writable address area. */
+CJSON_PUBLIC(void) cJSON_Minify(char *json);
+
+/* Helper functions for creating and adding items to an object at the same time.
+* They return the added item or NULL on failure. */
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean);
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number);
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string);
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw);
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name);
+
+/* When assigning an integer value, it needs to be propagated to valuedouble too. */
+#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) //NOLINT
+/* helper for the cJSON_SetNumberValue macro */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
+#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) //NOLINT
+/* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring);
+
+/* Macro for iterating over an array or object */
+#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) //NOLINT
+
+/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
+CJSON_PUBLIC(void) cJSON_free(void *object);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/file.h b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h
new file mode 100644
index 0000000000..4bbc1540db
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h
@@ -0,0 +1,198 @@
+#ifndef AWS_COMMON_FILE_H
+#define AWS_COMMON_FILE_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/byte_buf.h>
+#include <aws/common/common.h>
+#include <aws/common/platform.h>
+#include <stdio.h>
+
+#ifdef AWS_OS_WINDOWS
+# define AWS_PATH_DELIM '\\'
+# define AWS_PATH_DELIM_STR "\\"
+#else
+# define AWS_PATH_DELIM '/'
+# define AWS_PATH_DELIM_STR "/"
+#endif
+
+struct aws_string;
+struct aws_directory_iterator;
+
+enum aws_file_type {
+ AWS_FILE_TYPE_FILE = 1,
+ AWS_FILE_TYPE_SYM_LINK = 2,
+ AWS_FILE_TYPE_DIRECTORY = 4,
+};
+
+struct aws_directory_entry {
+ /**
+ * Absolute path to the entry from the current process root.
+ */
+ struct aws_byte_cursor path;
+ /**
+ * Path to the entry relative to the current working directory.
+ */
+ struct aws_byte_cursor relative_path;
+ /**
+ * Bit-field of enum aws_file_type
+ */
+ int file_type;
+ /**
+ * Size of the file on disk.
+ */
+ int64_t file_size;
+};
+
+/**
+ * Invoked during calls to aws_directory_traverse() as an entry is encountered. entry will contain
+ * the parsed directory entry info.
+ *
+ * Return true to continue the traversal, or alternatively, if you have a reason to abort the traversal, return false.
+ */
+typedef bool(aws_on_directory_entry)(const struct aws_directory_entry *entry, void *user_data);
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Don't use this. It never should have been added in the first place. It's now deprecated.
+ */
+AWS_COMMON_API FILE *aws_fopen(const char *file_path, const char *mode);
+
+/**
+ * Opens file at file_path using mode. Returns the FILE pointer if successful.
+ */
+AWS_COMMON_API FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode);
+
+/**
+ * Creates a directory if it doesn't currently exist. If the directory already exists, it's ignored and assumed
+ * successful.
+ *
+ * Returns AWS_OP_SUCCESS on success. Otherwise, check aws_last_error().
+ */
+AWS_COMMON_API int aws_directory_create(const struct aws_string *dir_path);
+/**
+ * Returns true if the directory currently exists. Otherwise, it returns false.
+ */
+AWS_COMMON_API bool aws_directory_exists(const struct aws_string *dir_path);
+/**
+ * Deletes a directory. If the directory is not empty, this will fail unless the recursive parameter is set to true.
+ * If recursive is true then the entire directory and all of its contents will be deleted. If it is set to false,
+ * the directory will be deleted only if it is empty. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
+ * aws_last_error() will contain the error that occurred. If the directory doesn't exist, AWS_OP_SUCCESS is still
+ * returned.
+ */
+AWS_COMMON_API int aws_directory_delete(const struct aws_string *dir_path, bool recursive);
+/**
+ * Deletes a file. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
+ * aws_last_error() will contain the error that occurred. If the file doesn't exist, AWS_OP_SUCCESS is still returned.
+ */
+AWS_COMMON_API int aws_file_delete(const struct aws_string *file_path);
+
+/**
+ * Moves directory at from to to.
+ * Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
+ * aws_last_error() will contain the error that occurred.
+ */
+AWS_COMMON_API int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to);
+
+/**
+ * Traverse a directory starting at path.
+ *
+ * If you want the traversal to recurse the entire directory, pass recursive as true. Passing false for this parameter
+ * will only iterate the contents of the directory, but will not descend into any directories it encounters.
+ *
+ * If recursive is set to true, the traversal is performed post-order, depth-first
+ * (for practical reasons such as deleting a directory that contains subdirectories or files).
+ *
+ * returns AWS_OP_SUCCESS(0) on success.
+ */
+AWS_COMMON_API int aws_directory_traverse(
+ struct aws_allocator *allocator,
+ const struct aws_string *path,
+ bool recursive,
+ aws_on_directory_entry *on_entry,
+ void *user_data);
+
+/**
+ * Creates a read-only iterator of a directory starting at path. If path is invalid or there's any other error
+ * condition, NULL will be returned. Call aws_last_error() for the exact error in that case.
+ */
+AWS_COMMON_API struct aws_directory_iterator *aws_directory_entry_iterator_new(
+ struct aws_allocator *allocator,
+ const struct aws_string *path);
+
+/**
+ * Moves the iterator to the next entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with
+ * AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available.
+ */
+AWS_COMMON_API int aws_directory_entry_iterator_next(struct aws_directory_iterator *iterator);
+
+/**
+ * Moves the iterator to the previous entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with
+ * AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available.
+ */
+AWS_COMMON_API int aws_directory_entry_iterator_previous(struct aws_directory_iterator *iterator);
+
+/**
+ * Cleanup and deallocate iterator
+ */
+AWS_COMMON_API void aws_directory_entry_iterator_destroy(struct aws_directory_iterator *iterator);
+
+/**
+ * Gets the aws_directory_entry value for iterator at the current position. Returns NULL if the iterator contains no
+ * entries.
+ */
+AWS_COMMON_API const struct aws_directory_entry *aws_directory_entry_iterator_get_value(
+ const struct aws_directory_iterator *iterator);
+
+/**
+ * Returns true iff the character is a directory separator on ANY supported platform.
+ */
+AWS_COMMON_API
+bool aws_is_any_directory_separator(char value);
+
+/**
+ * Returns the directory separator used by the local platform
+ */
+AWS_COMMON_API
+char aws_get_platform_directory_separator(void);
+
+/**
+ * Returns the current user's home directory.
+ */
+AWS_COMMON_API
+struct aws_string *aws_get_home_directory(struct aws_allocator *allocator);
+
+/**
+ * Returns true if a file or path exists, otherwise, false.
+ */
+AWS_COMMON_API
+bool aws_path_exists(const struct aws_string *path);
+
+/*
+ * Wrapper for highest-resolution platform-dependent seek implementation.
+ * Maps to:
+ *
+ * _fseeki64() on windows
+ * fseeko() on linux
+ *
+ * whence can either be SEEK_SET or SEEK_END
+ */
+AWS_COMMON_API
+int aws_fseek(FILE *file, int64_t offset, int whence);
+
+/*
+ * Wrapper for os-specific file length query. We can't use fseek(END, 0)
+ * because support for it is not technically required.
+ *
+ * Unix flavors call fstat, while Windows variants use GetFileSize on a
+ * HANDLE queried from the libc FILE pointer.
+ */
+AWS_COMMON_API
+int aws_file_get_length(FILE *file, int64_t *length);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_COMMON_FILE_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h b/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h
index c4ac55cb64..8135a15495 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/hash_table.h
@@ -12,6 +12,7 @@
#define AWS_COMMON_HASH_TABLE_ITER_CONTINUE (1 << 0)
#define AWS_COMMON_HASH_TABLE_ITER_DELETE (1 << 1)
+#define AWS_COMMON_HASH_TABLE_ITER_ERROR (1 << 2)
/**
* Hash table data structure. This module provides an automatically resizing
@@ -321,6 +322,10 @@ int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_el
* element (if not set, iteration stops)
* # AWS_COMMON_HASH_TABLE_ITER_DELETE - Deletes the current value and
* continues iteration. destroy_fn will NOT be invoked.
+ * # AWS_COMMON_HASH_TABLE_ITER_ERROR - Stop iteration with error.
+ * No action will be taken for the current value and the value before this.
+ * No rolling back. The deleted value before will NOT be back.
+ * aws_hash_table_foreach returns AWS_OP_ERR after stropping the iteration.
*
* Invoking any method which may change the contents of the hashtable
* during iteration results in undefined behavior. However, you may safely
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/json.h b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h
new file mode 100644
index 0000000000..5182bbf132
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h
@@ -0,0 +1,348 @@
+#ifndef AWS_COMMON_JSON_H
+#define AWS_COMMON_JSON_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/common.h>
+
+struct aws_json_value;
+
+// ====================
+// Create and pass type
+
+/**
+ * Creates a new string aws_json_value with the given string and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param string A byte pointer to the string you want to store in the aws_json_value
+ * @param allocator The allocator to use when creating the value
+ * @return A new string aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string);
+
+/**
+ * Creates a new number aws_json_value with the given number and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param number The number you want to store in the aws_json_value
+ * @param allocator The allocator to use when creating the value
+ * @return A new number aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_number(struct aws_allocator *allocator, double number);
+
+/**
+ * Creates a new array aws_json_value and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * Deleting this array will also destroy any aws_json_values it contains.
+ * @param allocator The allocator to use when creating the value
+ * @return A new array aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_array(struct aws_allocator *allocator);
+
+/**
+ * Creates a new boolean aws_json_value with the given boolean and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param boolean The boolean you want to store in the aws_json_value
+ * @param allocator The allocator to use when creating the value
+ * @return A new boolean aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_boolean(struct aws_allocator *allocator, bool boolean);
+
+/**
+ * Creates a new null aws_json_value and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * @param allocator The allocator to use when creating the value
+ * @return A new null aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_null(struct aws_allocator *allocator);
+
+/**
+ * Creates a new object aws_json_value and returns a pointer to it.
+ *
+ * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
+ * on the object/array containing the aws_json_value.
+ * Deleting this object will also destroy any aws_json_values it contains.
+ * @param allocator The allocator to use when creating the value
+ * @return A new object aws_json_value
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator);
+// ====================
+
+// ====================
+// Value getters
+
+/**
+ * Gets the string of a string aws_json_value.
+ * @param value The string aws_json_value.
+ * @param output The string
+ * @return AWS_OP_SUCESS if the value is a string, otherwise AWS_OP_ERR.
+ */
+AWS_COMMON_API
+int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output);
+
+/**
+ * Gets the number of a number aws_json_value.
+ * @param value The number aws_json_value.
+ * @param output The number
+ * @return AWS_OP_SUCESS if the value is a number, otherwise AWS_OP_ERR.
+ */
+AWS_COMMON_API
+int aws_json_value_get_number(const struct aws_json_value *value, double *output);
+
+/**
+ * Gets the boolean of a boolean aws_json_value.
+ * @param value The boolean aws_json_value.
+ * @param output The boolean
+ * @return AWS_OP_SUCESS if the value is a boolean, otherwise AWS_OP_ERR.
+ */
+AWS_COMMON_API
+int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output);
+// ====================
+
+// ====================
+// Object API
+
+/**
+ * Adds a aws_json_value to a object aws_json_value.
+ *
+ * Note that the aws_json_value will be destroyed when the aws_json_value object is destroyed
+ * by calling "aws_json_destroy()"
+ * @param object The object aws_json_value you want to add a value to.
+ * @param key The key to add the aws_json_value at.
+ * @param value The aws_json_value you want to add.
+ * @return AWS_OP_SUCCESS if adding was successful.
+ * Will return AWS_OP_ERROR if the object passed is invalid or if the passed key
+ * is already in use in the object.
+ */
+AWS_COMMON_API
+int aws_json_value_add_to_object(
+ struct aws_json_value *object,
+ struct aws_byte_cursor key,
+ struct aws_json_value *value);
+
+/**
+ * Returns the aws_json_value at the given key.
+ * @param object The object aws_json_value you want to get the value from.
+ * @param key The key that the aws_json_value is at. Is case sensitive.
+ * @return The aws_json_value at the given key, otherwise NULL.
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_value *object, struct aws_byte_cursor key);
+
+/**
+ * Checks if there is a aws_json_value at the given key.
+ * @param object The value aws_json_value you want to check a key in.
+ * @param key The key that you want to check. Is case sensitive.
+ * @return True if a aws_json_value is found.
+ */
+AWS_COMMON_API
+bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte_cursor key);
+
+/**
+ * Removes the aws_json_value at the given key.
+ * @param object The object aws_json_value you want to remove a aws_json_value in.
+ * @param key The key that the aws_json_value is at. Is case sensitive.
+ * @return AWS_OP_SUCCESS if the aws_json_value was removed.
+ * Will return AWS_OP_ERR if the object passed is invalid or if the value
+ * at the key cannot be found.
+ */
+AWS_COMMON_API
+int aws_json_value_remove_from_object(struct aws_json_value *object, struct aws_byte_cursor key);
+// ====================
+
+// ====================
+// Array API
+
+/**
+ * Adds a aws_json_value to the given array aws_json_value.
+ *
+ * Note that the aws_json_value will be destroyed when the aws_json_value array is destroyed
+ * by calling "aws_json_destroy()"
+ * @param array The array aws_json_value you want to add an aws_json_value to.
+ * @param value The aws_json_value you want to add.
+ * @return AWS_OP_SUCCESS if adding the aws_json_value was successful.
+ * Will return AWS_OP_ERR if the array passed is invalid.
+ */
+AWS_COMMON_API
+int aws_json_value_add_array_element(struct aws_json_value *array, const struct aws_json_value *value);
+
+/**
+ * Returns the aws_json_value at the given index in the array aws_json_value.
+ * @param array The array aws_json_value.
+ * @param index The index of the aws_json_value you want to access.
+ * @return A pointer to the aws_json_value at the given index in the array, otherwise NULL.
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index);
+
+/**
+ * Returns the number of items in the array aws_json_value.
+ * @param array The array aws_json_value.
+ * @return The number of items in the array_json_value.
+ */
+AWS_COMMON_API
+size_t aws_json_get_array_size(const struct aws_json_value *array);
+
+/**
+ * Removes the aws_json_value at the given index in the array aws_json_value.
+ * @param array The array aws_json_value.
+ * @param index The index containing the aws_json_value you want to remove.
+ * @return AWS_OP_SUCCESS if the aws_json_value at the index was removed.
+ * Will return AWS_OP_ERR if the array passed is invalid or if the index
+ * passed is out of range.
+ */
+AWS_COMMON_API
+int aws_json_value_remove_array_element(struct aws_json_value *array, size_t index);
+// ====================
+
+// ====================
+// Checks
+
+/**
+ * Checks if the aws_json_value is a string.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a string aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_string(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a number.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a number aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_number(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a array.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a array aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_array(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a boolean.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a boolean aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_boolean(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a null aws_json_value.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a null aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_null(const struct aws_json_value *value);
+
+/**
+ * Checks if the aws_json_value is a object aws_json_value.
+ * @param value The aws_json_value to check.
+ * @return True if the aws_json_value is a object aws_json_value, otherwise false.
+ */
+AWS_COMMON_API
+bool aws_json_value_is_object(const struct aws_json_value *value);
+// ====================
+
+// ====================
+// Memory Management
+
+/**
+ * Initializes the JSON module for use.
+ * @param allocator The allocator to use for creating aws_json_value structs.
+ */
+AWS_COMMON_API
+void aws_json_module_init(struct aws_allocator *allocator);
+
+/**
+ * Cleans up the JSON module. Should be called when finished using the module.
+ */
+AWS_COMMON_API
+void aws_json_module_cleanup(void);
+
+/**
+ * Removes the aws_json_value from memory. If the aws_json_value is a object or array, it will also destroy
+ * attached aws_json_values as well.
+ *
+ * For example, if you called "aws_json_array_add(b, a)" to add an object "a" to an array "b", if you call
+ * "aws_json_destroy(b)" then it will also free "a" automatically. All children/attached aws_json_values are freed
+ * when the parent/root aws_json_value is destroyed.
+ * @param value The aws_json_value to destroy.
+ */
+AWS_COMMON_API
+void aws_json_value_destroy(struct aws_json_value *value);
+// ====================
+
+// ====================
+// Utility
+
+/**
+ * Appends a unformatted JSON string representation of the aws_json_value into the passed byte buffer.
+ * The byte buffer is expected to be already initialized so the function can append the JSON into it.
+ *
+ * Note: The byte buffer will automatically have its size extended if the JSON string is over the byte
+ * buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not
+ * have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned.
+ *
+ * Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free
+ * the memory used, as it will NOT be called automatically.
+ * @param value The aws_json_value to format.
+ * @param output The destination for the JSON string
+ * @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors
+ * Will return AWS_OP_ERR if the value passed is not an aws_json_value or if there
+ * was an error appending the JSON into the byte buffer.
+ */
+AWS_COMMON_API
+int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output);
+
+/**
+ * Appends a formatted JSON string representation of the aws_json_value into the passed byte buffer.
+ * The byte buffer is expected to already be initialized so the function can append the JSON into it.
+ *
+ * Note: The byte buffer will automatically have its size extended if the JSON string is over the byte
+ * buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not
+ * have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned.
+ *
+ * Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free
+ * the memory used, as it will NOT be called automatically.
+ * @param value The aws_json_value to format.
+ * @param output The destination for the JSON string
+ * @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors
+ * Will return AWS_ERROR_INVALID_ARGUMENT if the value passed is not an aws_json_value or if there
+ * aws an error appending the JSON into the byte buffer.
+ */
+AWS_COMMON_API
+int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output);
+
+/**
+ * Parses the JSON string and returns a aws_json_value containing the root of the JSON.
+ * @param allocator The allocator used to create the value
+ * @param string The string containing the JSON.
+ * @return The root aws_json_value of the JSON.
+ */
+AWS_COMMON_API
+struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string);
+// ====================
+
+#endif // AWS_COMMON_JSON_H
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
index 5015d673e0..1b34e3bae3 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
@@ -6,6 +6,7 @@
* SPDX-License-Identifier: Apache-2.0.
*/
+#include <aws/common/atomics.h>
#include <aws/common/common.h>
#include <aws/common/thread.h>
@@ -80,6 +81,9 @@ enum aws_common_log_subject {
AWS_LS_COMMON_THREAD,
AWS_LS_COMMON_MEMTRACE,
AWS_LS_COMMON_XML_PARSER,
+ AWS_LS_COMMON_IO,
+ AWS_LS_COMMON_BUS,
+ AWS_LS_COMMON_TEST,
AWS_LS_COMMON_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
@@ -109,6 +113,7 @@ struct aws_logger_vtable {
;
enum aws_log_level (*const get_log_level)(struct aws_logger *logger, aws_log_subject_t subject);
void (*const clean_up)(struct aws_logger *logger);
+ int (*set_log_level)(struct aws_logger *logger, enum aws_log_level);
};
struct aws_logger {
@@ -189,7 +194,7 @@ struct aws_logger_pipeline {
struct aws_log_channel *channel;
struct aws_log_writer *writer;
struct aws_allocator *allocator;
- enum aws_log_level level;
+ struct aws_atomic_var level;
};
/**
@@ -224,6 +229,15 @@ AWS_COMMON_API
void aws_logger_clean_up(struct aws_logger *logger);
/**
+ * Sets the current logging level for the logger. Loggers are not require to support this.
+ * @param logger logger to set the log level for
+ * @param level new log level for the logger
+ * @return AWS_OP_SUCCESS if the level was successfully set, AWS_OP_ERR otherwise
+ */
+AWS_COMMON_API
+int aws_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level);
+
+/**
* Converts a log level to a c-string constant. Intended primarily to support building log lines that
* include the level in them, i.e.
*
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
index 4bd7e028d1..48f90ad501 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
@@ -75,6 +75,16 @@ AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3);
# endif /* defined(__GNUC__) || defined(__clang__) */
#endif /* defined(_MSC_VER) */
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer)
+# define AWS_SUPPRESS_ASAN __attribute__((no_sanitize("address")))
+# endif
+#endif
+
+#if !defined(AWS_SUPPRESS_ASAN)
+# define AWS_SUPPRESS_ASAN
+#endif
+
/* If this is C++, restrict isn't supported. If this is not at least C99 on gcc and clang, it isn't supported.
* If visual C++ building in C mode, the restrict definition is __restrict.
* This just figures all of that out based on who's including this header file. */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h b/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h
index 73c2ecfa55..edb91864a8 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/mutex.h
@@ -57,6 +57,7 @@ int aws_mutex_lock(struct aws_mutex *mutex);
* Attempts to acquire the lock but returns immediately if it can not.
* While on some platforms such as Windows, this may behave as a reentrant mutex,
* you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be.
+ * Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps]
*/
AWS_COMMON_API
int aws_mutex_try_lock(struct aws_mutex *mutex);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h b/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h
index c8be19c7d6..b1e16a1e7c 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/platform.h
@@ -10,11 +10,20 @@
#ifdef _WIN32
# define AWS_OS_WINDOWS
+/* indicate whether this is for Windows desktop, or UWP or Windows S, or other Windows-like devices */
+# if defined(AWS_HAVE_WINAPI_DESKTOP)
+# define AWS_OS_WINDOWS_DESKTOP
+# endif
+
#elif __APPLE__
# define AWS_OS_APPLE
# include "TargetConditionals.h"
# if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
# define AWS_OS_IOS
+# elif defined(TARGET_OS_WATCH) && TARGET_OS_WATCH
+# define AWS_OS_WATCHOS
+# elif defined(TARGET_OS_TV) && TARGET_OS_TV
+# define AWS_OS_TVOS
# else
# define AWS_OS_MACOS
# endif
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h b/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h
index 8859729346..a4df8c5061 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/priority_queue.h
@@ -41,7 +41,7 @@ struct aws_priority_queue {
};
struct aws_priority_queue_node {
- /** The current index of the node in queuesion, or SIZE_MAX if the node has been removed. */
+ /** The current index of the node in question, or SIZE_MAX if the node has been removed. */
size_t current_index;
};
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h b/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h
index 943f6cb98d..c9a90897c8 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/dlloads.h
@@ -10,7 +10,13 @@
*/
#define AWS_MPOL_PREFERRED_ALIAS 1
+struct bitmask;
+
extern long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long);
+extern int (*g_numa_available_ptr)(void);
+extern int (*g_numa_num_configured_nodes_ptr)(void);
+extern int (*g_numa_num_possible_cpus_ptr)(void);
+extern int (*g_numa_node_of_cpu_ptr)(int cpu);
extern void *g_libnuma_handle;
#endif /* AWS_COMMON_PRIVATE_DLLOADS_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
index b18a3cc97f..50b269fc7b 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
@@ -1,5 +1,8 @@
#ifndef AWS_COMMON_PRIVATE_LOOKUP3_INL
#define AWS_COMMON_PRIVATE_LOOKUP3_INL
+
+#include <aws/common/macros.h>
+
/* clang-format off */
/*
@@ -498,6 +501,7 @@ static void hashlittle2(
size_t length, /* length of the key */
uint32_t *pc, /* IN: primary initval, OUT: primary hash */
uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */
+ AWS_SUPPRESS_ASAN /* AddressSanitizer hates this implementation, even though it's innocuous */
{
uint32_t a,b,c; /* internal state */
union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h b/contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h
new file mode 100644
index 0000000000..ca263e56c9
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/thread_shared.h
@@ -0,0 +1,39 @@
+#ifndef AWS_COMMON_PRIVATE_THREAD_SHARED_H
+#define AWS_COMMON_PRIVATE_THREAD_SHARED_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/thread.h>
+
+struct aws_linked_list;
+struct aws_linked_list_node;
+
+/**
+ * Iterates a list of thread wrappers, joining against each corresponding thread, and freeing the wrapper once
+ * the join has completed. Do not hold the managed thread lock when invoking this function, instead swap the
+ * pending join list into a local and call this on the local.
+ *
+ * @param wrapper_list list of thread wrappers to join and free
+ */
+AWS_COMMON_API void aws_thread_join_and_free_wrapper_list(struct aws_linked_list *wrapper_list);
+
+/**
+ * Adds a thread (wrapper embedding a linked list node) to the global list of threads that have run to completion
+ * and need a join in order to know that the OS has truly finished with the thread.
+ * @param node linked list node embedded in the thread wrapper
+ */
+AWS_COMMON_API void aws_thread_pending_join_add(struct aws_linked_list_node *node);
+
+/**
+ * Initializes the managed thread system. Called during library init.
+ */
+AWS_COMMON_API void aws_thread_initialize_thread_management(void);
+
+/**
+ * Gets the current managed thread count
+ */
+AWS_COMMON_API size_t aws_thread_get_managed_thread_count(void);
+
+#endif /* AWS_COMMON_PRIVATE_THREAD_SHARED_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/promise.h b/contrib/restricted/aws/aws-c-common/include/aws/common/promise.h
new file mode 100644
index 0000000000..e19d858c72
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/promise.h
@@ -0,0 +1,95 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_COMMON_PROMISE_H
+#define AWS_COMMON_PROMISE_H
+
+#include <aws/common/common.h>
+
+/*
+ * Standard promise interface. Promise can be waited on by multiple threads, and as long as it is
+ * ref-counted correctly, will provide the resultant value/error code to all waiters.
+ * All promise API calls are internally thread-safe.
+ */
+struct aws_promise;
+/*
+ * Creates a new promise
+ */
+AWS_COMMON_API
+struct aws_promise *aws_promise_new(struct aws_allocator *allocator);
+
+/*
+ * Indicate a new reference to a promise. At minimum, each new thread making use of the promise should
+ * acquire it.
+ */
+AWS_COMMON_API
+struct aws_promise *aws_promise_acquire(struct aws_promise *promise);
+
+/*
+ * Releases a reference on the promise. When the refcount hits 0, the promise is cleaned up and freed.
+ */
+AWS_COMMON_API
+void aws_promise_release(struct aws_promise *promise);
+
+/*
+ * Waits infinitely for the promise to be completed
+ */
+AWS_COMMON_API
+void aws_promise_wait(struct aws_promise *promise);
+/*
+ * Waits for the requested time in nanoseconds. Returns true if the promise was completed.
+ */
+AWS_COMMON_API
+bool aws_promise_wait_for(struct aws_promise *promise, size_t nanoseconds);
+
+/*
+ * Completes the promise and stores the result along with an optional destructor. If the value
+ * is not taken via `aws_promise_take_value`, it will be destroyed when the promise's reference
+ * count reaches zero.
+ * NOTE: Promise cannot be completed twice
+ */
+AWS_COMMON_API
+void aws_promise_complete(struct aws_promise *promise, void *value, void (*dtor)(void *));
+
+/*
+ * Completes the promise and stores the error code
+ * NOTE: Promise cannot be completed twice
+ */
+AWS_COMMON_API
+void aws_promise_fail(struct aws_promise *promise, int error_code);
+
+/*
+ * Returns whether or not the promise has completed (regardless of success or failure)
+ */
+AWS_COMMON_API
+bool aws_promise_is_complete(struct aws_promise *promise);
+
+/*
+ * Returns the error code recorded if the promise failed, or 0 if it succeeded
+ * NOTE: It is fatal to attempt to retrieve the error code before the promise is completed
+ */
+AWS_COMMON_API
+int aws_promise_error_code(struct aws_promise *promise);
+
+/*
+ * Returns the value provided to the promise if it succeeded, or NULL if none was provided
+ * or the promise failed. Check `aws_promise_error_code` to be sure.
+ * NOTE: The ownership of the value is retained by the promise.
+ * NOTE: It is fatal to attempt to retrieve the value before the promise is completed
+ */
+AWS_COMMON_API
+void *aws_promise_value(struct aws_promise *promise);
+
+/*
+ * Returns the value provided to the promise if it succeeded, or NULL if none was provided
+ * or the promise failed. Check `aws_promise_error_code` to be sure.
+ * NOTE: The promise relinquishes ownership of the value, the caller is now responsible for
+ * freeing any resources associated with the value
+ * NOTE: It is fatal to attempt to take the value before the promise is completed
+ */
+AWS_COMMON_API
+void *aws_promise_take_value(struct aws_promise *promise);
+
+#endif // AWS_COMMON_PROMISE_H
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h b/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h
index 71b33b892f..596ec2c497 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/ref_count.h
@@ -54,46 +54,6 @@ AWS_COMMON_API void *aws_ref_count_acquire(struct aws_ref_count *ref_count);
*/
AWS_COMMON_API size_t aws_ref_count_release(struct aws_ref_count *ref_count);
-/**
- * Utility function that returns when all auxiliary threads created by crt types (event loop groups and
- * host resolvers) have completed and those types have completely cleaned themselves up. The actual cleanup
- * process may be invoked as a part of a spawned thread, but the wait will not get signalled until that cleanup
- * thread is in its at_exit callback processing loop with no outstanding memory allocations.
- *
- * Primarily used by tests to guarantee that everything is cleaned up before performing a memory check.
- */
-AWS_COMMON_API void aws_global_thread_creator_shutdown_wait(void);
-
-/**
- * Utility function that returns when all auxiliary threads created by crt types (event loop groups and
- * host resolvers) have completed and those types have completely cleaned themselves up. The actual cleanup
- * process may be invoked as a part of a spawned thread, but the wait will not get signalled until that cleanup
- * thread is in its at_exit callback processing loop with no outstanding memory allocations.
- *
- * Primarily used by tests to guarantee that everything is cleaned up before performing a memory check.
- *
- * Returns AWS_OP_SUCCESS if the conditional wait terminated properly, AWS_OP_ERR otherwise (timeout, etc..)
- */
-AWS_COMMON_API int aws_global_thread_creator_shutdown_wait_for(uint32_t wait_timeout_in_seconds);
-
-/**
- * Increments the global thread creator count. Currently invoked on event loop group and host resolver creation.
- *
- * Tracks the number of outstanding thread-creating objects (not the total number of threads generated).
- * Currently this is the number of aws_host_resolver and aws_event_loop_group objects that have not yet been
- * fully cleaned up.
- */
-AWS_COMMON_API void aws_global_thread_creator_increment(void);
-
-/**
- * Decrements the global thread creator count. Currently invoked on event loop group and host resolver destruction.
- *
- * Tracks the number of outstanding thread-creating objects (not the total number of threads generated).
- * Currently this is the number of aws_host_resolver and aws_event_loop_group objects that have not yet been
- * fully cleaned up.
- */
-AWS_COMMON_API void aws_global_thread_creator_decrement(void);
-
AWS_EXTERN_C_END
#endif /* AWS_COMMON_REF_COUNT_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h b/contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h
deleted file mode 100644
index 9d636e23ff..0000000000
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/resource_name.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-#ifndef AWS_COMMON_RESOURCE_NAME_H
-#define AWS_COMMON_RESOURCE_NAME_H
-#pragma once
-
-#include <aws/common/byte_buf.h>
-#include <aws/common/common.h>
-
-struct aws_resource_name {
- struct aws_byte_cursor partition;
- struct aws_byte_cursor service;
- struct aws_byte_cursor region;
- struct aws_byte_cursor account_id;
- struct aws_byte_cursor resource_id;
-};
-
-AWS_EXTERN_C_BEGIN
-
-/**
- Given an ARN "Amazon Resource Name" represented as an in memory a
- structure representing the parts
-*/
-AWS_COMMON_API
-int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input);
-
-/**
- Calculates the space needed to write an ARN to a byte buf
-*/
-AWS_COMMON_API
-int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size);
-
-/**
- Serializes an ARN structure into the lexical string format
-*/
-AWS_COMMON_API
-int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn);
-
-AWS_EXTERN_C_END
-
-#endif /* AWS_COMMON_RESOURCE_NAME_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h
index d3e7b6da5a..9f9a1499e3 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.h
@@ -92,12 +92,6 @@ AWS_COMMON_API bool aws_ring_buffer_buf_belongs_to_pool(
const struct aws_byte_buf *buf);
/**
- * Initializes the supplied allocator to be based on the provided ring buffer. Allocations must be allocated
- * and freed in the same order, or the ring buffer will assert.
- */
-AWS_COMMON_API int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer);
-
-/**
* Cleans up a ring buffer allocator instance. Does not clean up the ring buffer.
*/
AWS_COMMON_API void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl
index 7ce79a68d9..34e76a4a13 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/ring_buffer.inl
@@ -14,7 +14,7 @@ AWS_EXTERN_C_BEGIN
AWS_STATIC_IMPL bool aws_ring_buffer_check_atomic_ptr(
const struct aws_ring_buffer *ring_buf,
const uint8_t *atomic_ptr) {
- return (atomic_ptr >= ring_buf->allocation && atomic_ptr <= ring_buf->allocation_end);
+ return ((atomic_ptr != NULL) && (atomic_ptr >= ring_buf->allocation && atomic_ptr <= ring_buf->allocation_end));
}
/**
@@ -37,8 +37,8 @@ AWS_STATIC_IMPL bool aws_ring_buffer_is_valid(const struct aws_ring_buffer *ring
bool tail_in_range = aws_ring_buffer_check_atomic_ptr(ring_buf, tail);
/* if head points-to the first element of the buffer then tail must too */
bool valid_head_tail = (head != ring_buf->allocation) || (tail == ring_buf->allocation);
- return ring_buf && AWS_MEM_IS_READABLE(ring_buf->allocation, ring_buf->allocation_end - ring_buf->allocation) &&
- head_in_range && tail_in_range && valid_head_tail && (ring_buf->allocator != NULL);
+ return ring_buf && (ring_buf->allocation != NULL) && head_in_range && tail_in_range && valid_head_tail &&
+ (ring_buf->allocator != NULL);
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_RING_BUFFER_INL */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h b/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h
index 64863d2c28..01c257dfba 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/rw_lock.h
@@ -54,6 +54,7 @@ AWS_COMMON_API int aws_rw_lock_wlock(struct aws_rw_lock *lock);
* Attempts to acquire the lock but returns immediately if it can not.
* While on some platforms such as Windows, this may behave as a reentrant mutex,
* you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be.
+ * Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps]
*/
AWS_COMMON_API int aws_rw_lock_try_rlock(struct aws_rw_lock *lock);
AWS_COMMON_API int aws_rw_lock_try_wlock(struct aws_rw_lock *lock);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h b/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h
index 8249684710..6775960f0b 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/stdint.h
@@ -85,16 +85,9 @@ AWS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(void *));
AWS_STATIC_ASSERT(sizeof(char) == 1);
#endif /* NO_STDINT */
-#if defined(_MSC_VER)
+/**
+ * @deprecated Use int64_t instead for offsets in public APIs.
+ */
typedef int64_t aws_off_t;
-#else
-# if _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L
-typedef off_t aws_off_t;
-# else
-typedef long aws_off_t;
-# endif /* _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L */
-#endif /* defined(_MSC_VER) */
-
-AWS_STATIC_ASSERT(sizeof(int64_t) >= sizeof(aws_off_t));
#endif /* AWS_COMMON_STDINT_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/string.h b/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
index 58eba5baf7..c73a24ad4a 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
@@ -40,16 +40,133 @@
#endif
struct aws_string {
struct aws_allocator *const allocator;
+ /* size in bytes of `bytes` minus any null terminator.
+ * NOTE: This is not the number of characters in the string. */
const size_t len;
/* give this a storage specifier for C++ purposes. It will likely be larger after init. */
const uint8_t bytes[1];
};
+
+#ifdef AWS_OS_WINDOWS
+struct aws_wstring {
+ struct aws_allocator *const allocator;
+ /* number of characters in the string not including the null terminator. */
+ const size_t len;
+ /* give this a storage specifier for C++ purposes. It will likely be larger after init. */
+ const wchar_t bytes[1];
+};
+#endif /* AWS_OS_WINDOWS */
+
#ifdef _MSC_VER
# pragma warning(pop)
#endif
AWS_EXTERN_C_BEGIN
+#ifdef AWS_OS_WINDOWS
+/**
+ * For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop.
+ *
+ * Note: `to_convert` is assumed to be UTF-8 or ASCII.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wstring(
+ struct aws_allocator *allocator,
+ const struct aws_string *to_convert);
+
+/**
+ * For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop.
+ *
+ * Note: `to_convert` is assumed to be UTF-8 or ASCII.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wchar_from_byte_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *to_convert);
+
+/**
+ * clean up str.
+ */
+AWS_COMMON_API
+void aws_wstring_destroy(struct aws_wstring *str);
+
+/**
+ * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
+ *
+ * Note: `to_convert` is assumed to be wchar already.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_str(
+ struct aws_allocator *allocator,
+ const struct aws_wstring *to_convert);
+
+/**
+ * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
+ *
+ * Note: `to_convert` is assumed to be wchar already.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_byte_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *to_convert);
+
+/**
+ * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
+ *
+ * Note: `to_convert` is assumed to be wchar already.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_c_str(
+ struct aws_allocator *allocator,
+ const wchar_t *to_convert);
+
+/**
+ * Create a new wide string from a byte cursor. This assumes that w_str_cur is already in utf-16.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *w_str_cur);
+
+/**
+ * Create a new wide string from a utf-16 string enclosing array. The length field is in number of characters not
+ * counting the null terminator.
+ *
+ * returns NULL on failure.
+ */
+AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_array(
+ struct aws_allocator *allocator,
+ const wchar_t *w_str,
+ size_t length);
+
+/**
+ * Returns a wchar_t * pointer for use with windows OS interop.
+ */
+AWS_COMMON_API const wchar_t *aws_wstring_c_str(const struct aws_wstring *str);
+
+/**
+ * Returns the number of characters in the wchar string. NOTE: This is not the length in bytes or the buffer size.
+ */
+AWS_COMMON_API size_t aws_wstring_num_chars(const struct aws_wstring *str);
+
+/**
+ * Returns the length in bytes for the buffer.
+ */
+AWS_COMMON_API size_t aws_wstring_size_bytes(const struct aws_wstring *str);
+
+/**
+ * Verifies that str is a valid string. Returns true if it's valid and false otherwise.
+ */
+AWS_COMMON_API bool aws_wstring_is_valid(const struct aws_wstring *str);
+
+#endif /* AWS_OS_WINDOWS */
+
/**
* Returns true if bytes of string are the same, false otherwise.
*/
@@ -212,7 +329,7 @@ struct aws_byte_cursor aws_byte_cursor_from_string(const struct aws_string *src)
AWS_COMMON_API
struct aws_string *aws_string_clone_or_reuse(struct aws_allocator *allocator, const struct aws_string *str);
-/* Computes the length of a c string in bytes assuming the character set is either ASCII or UTF-8. If no NULL character
+/** Computes the length of a c string in bytes assuming the character set is either ASCII or UTF-8. If no NULL character
* is found within max_read_len of str, AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED is raised. Otherwise, str_len
* will contain the string length minus the NULL character, and AWS_OP_SUCCESS will be returned. */
AWS_COMMON_API
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h b/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h
index 4143fed56b..5b6600e939 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/system_info.h
@@ -14,6 +14,11 @@ enum aws_platform_os {
AWS_PLATFORM_OS_UNIX,
};
+struct aws_cpu_info {
+ int32_t cpu_id;
+ bool suspected_hyper_thread;
+};
+
AWS_EXTERN_C_BEGIN
/* Returns the OS this was built under */
@@ -24,6 +29,25 @@ enum aws_platform_os aws_get_platform_build_os(void);
AWS_COMMON_API
size_t aws_system_info_processor_count(void);
+/**
+ * Returns the logical processor groupings on the system (such as multiple numa nodes).
+ */
+AWS_COMMON_API
+uint16_t aws_get_cpu_group_count(void);
+
+/**
+ * For a group, returns the number of CPUs it contains.
+ */
+AWS_COMMON_API
+size_t aws_get_cpu_count_for_group(uint16_t group_idx);
+
+/**
+ * Fills in cpu_ids_array with the cpu_id's for the group. To obtain the size to allocate for cpu_ids_array
+ * and the value for argument for cpu_ids_array_length, call aws_get_cpu_count_for_group().
+ */
+AWS_COMMON_API
+void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length);
+
/* Returns true if a debugger is currently attached to the process. */
AWS_COMMON_API
bool aws_is_debugger_present(void);
@@ -74,7 +98,7 @@ void aws_backtrace_print(FILE *fp, void *call_site_data);
/* Log the callstack from the current stack to the currently configured aws_logger */
AWS_COMMON_API
-void aws_backtrace_log(void);
+void aws_backtrace_log(int log_level);
AWS_EXTERN_C_END
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h b/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h
index 1c78fd3e51..24a5cc60d4 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/task_scheduler.h
@@ -33,7 +33,12 @@ struct aws_task {
struct aws_linked_list_node node;
struct aws_priority_queue_node priority_queue_node;
const char *type_tag;
- size_t reserved;
+
+ /* honor the ABI compat */
+ union {
+ bool scheduled;
+ size_t reserved;
+ } abi_extension;
};
struct aws_task_scheduler {
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h b/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
index e7abd79f7e..49e5241748 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
@@ -15,6 +15,41 @@ enum aws_thread_detach_state {
AWS_THREAD_NOT_CREATED = 1,
AWS_THREAD_JOINABLE,
AWS_THREAD_JOIN_COMPLETED,
+ AWS_THREAD_MANAGED,
+};
+
+/**
+ * Specifies the join strategy used on an aws_thread, which in turn controls whether or not a thread participates
+ * in the managed thread system. The managed thread system provides logic to guarantee a join on all participating
+ * threads at the cost of laziness (the user cannot control when joins happen).
+ *
+ * Manual - thread does not particpate in the managed thread system; any joins must be done by the user. This
+ * is the default. The user must call aws_thread_clean_up(), but only after any desired join operation has completed.
+ * Not doing so will cause the windows handle to leak.
+ *
+ * Managed - the managed thread system will automatically perform a join some time after the thread's run function
+ * has completed. It is an error to call aws_thread_join on a thread configured with the managed join strategy. The
+ * managed thread system will call aws_thread_clean_up() on the thread after the background join has completed.
+ *
+ * Additionally, an API exists, aws_thread_join_all_managed(), which blocks and returns when all outstanding threads
+ * with the managed strategy have fully joined. This API is useful for tests (rather than waiting for many individual
+ * signals) and program shutdown or DLL unload. This API is automatically invoked by the common library clean up
+ * function. If the common library clean up is called from a managed thread, this will cause deadlock.
+ *
+ * Lazy thread joining is done only when threads finish their run function or when the user calls
+ * aws_thread_join_all_managed(). This means it may be a long time between thread function completion and the join
+ * being applied, but the queue of unjoined threads is always one or fewer so there is no critical resource
+ * backlog.
+ *
+ * Currently, only event loop group async cleanup and host resolver threads participate in the managed thread system.
+ * Additionally, event loop threads will increment and decrement the pending join count (they are manually joined
+ * internally) in order to have an accurate view of internal thread usage and also to prevent failure to release
+ * an event loop group fully from allowing aws_thread_join_all_managed() from running to completion when its
+ * intent is such that it should block instead.
+ */
+enum aws_thread_join_strategy {
+ AWS_TJS_MANUAL = 0,
+ AWS_TJS_MANAGED,
};
struct aws_thread_options {
@@ -30,6 +65,8 @@ struct aws_thread_options {
* On Apple and Android platforms, this setting doesn't do anything at all.
*/
int32_t cpu_id;
+
+ enum aws_thread_join_strategy join_strategy;
};
#ifdef _WIN32
@@ -81,7 +118,11 @@ int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator);
/**
* Creates an OS level thread and associates it with func. context will be passed to func when it is executed.
* options will be applied to the thread if they are applicable for the platform.
- * You must either call join or detach after creating the thread and before calling clean_up.
+ *
+ * After launch, you may join on the thread. A successfully launched thread must have clean_up called on it in order
+ * to avoid a handle leak. If you do not join before calling clean_up, the thread will become detached.
+ *
+ * Managed threads must not have join or clean_up called on them by external code.
*/
AWS_COMMON_API
int aws_thread_launch(
@@ -105,13 +146,31 @@ enum aws_thread_detach_state aws_thread_get_detach_state(struct aws_thread *thre
/**
* Joins the calling thread to a thread instance. Returns when thread is
- * finished.
+ * finished. Calling this from the associated OS thread will cause a deadlock.
*/
AWS_COMMON_API
int aws_thread_join(struct aws_thread *thread);
/**
- * Cleans up the thread handle. Either detach or join must be called
+ * Blocking call that waits for all managed threads to complete their join call. This can only be called
+ * from the main thread or a non-managed thread.
+ *
+ * This gets called automatically from library cleanup.
+ *
+ * By default the wait is unbounded, but that default can be overridden via aws_thread_set_managed_join_timeout_ns()
+ */
+AWS_COMMON_API
+int aws_thread_join_all_managed(void);
+
+/**
+ * Overrides how long, in nanoseconds, that aws_thread_join_all_managed will wait for threads to complete.
+ * A value of zero will result in an unbounded wait.
+ */
+AWS_COMMON_API
+void aws_thread_set_managed_join_timeout_ns(uint64_t timeout_in_ns);
+
+/**
+ * Cleans up the thread handle. Don't call this on a managed thread. If you wish to join the thread, you must join
* before calling this function.
*/
AWS_COMMON_API
@@ -146,6 +205,24 @@ typedef void(aws_thread_atexit_fn)(void *user_data);
AWS_COMMON_API
int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data);
+/**
+ * Increments the count of unjoined threads in the managed thread system. Used by managed threads and
+ * event loop threads. Additional usage requires the user to join corresponding threads themselves and
+ * correctly increment/decrement even in the face of launch/join errors.
+ *
+ * aws_thread_join_all_managed() will not return until this count has gone to zero.
+ */
+AWS_COMMON_API void aws_thread_increment_unjoined_count(void);
+
+/**
+ * Decrements the count of unjoined threads in the managed thread system. Used by managed threads and
+ * event loop threads. Additional usage requires the user to join corresponding threads themselves and
+ * correctly increment/decrement even in the face of launch/join errors.
+ *
+ * aws_thread_join_all_managed() will not return until this count has gone to zero.
+ */
+AWS_COMMON_API void aws_thread_decrement_unjoined_count(void);
+
AWS_EXTERN_C_END
#endif /* AWS_COMMON_THREAD_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h b/contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h
new file mode 100644
index 0000000000..5457aa2d7b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/thread_scheduler.h
@@ -0,0 +1,60 @@
+#ifndef AWS_COMMON_THREAD_SCHEDULER_H
+#define AWS_COMMON_THREAD_SCHEDULER_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+
+struct aws_thread_scheduler;
+struct aws_thread_options;
+struct aws_task;
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates a new instance of a thread scheduler. This object receives scheduled tasks and executes them inside a
+ * background thread. On success, this function returns an instance with a ref-count of 1. On failure it returns NULL.
+ *
+ * thread_options are optional.
+ *
+ * The semantics of this interface conform to the semantics of aws_task_scheduler.
+ */
+AWS_COMMON_API
+struct aws_thread_scheduler *aws_thread_scheduler_new(
+ struct aws_allocator *allocator,
+ const struct aws_thread_options *thread_options);
+
+/**
+ * Acquire a reference to the scheduler.
+ */
+AWS_COMMON_API void aws_thread_scheduler_acquire(struct aws_thread_scheduler *scheduler);
+
+/**
+ * Release a reference to the scheduler.
+ */
+AWS_COMMON_API void aws_thread_scheduler_release(const struct aws_thread_scheduler *scheduler);
+
+/**
+ * Schedules a task to run in the future. time_to_run is the absolute time from the system hw_clock.
+ */
+AWS_COMMON_API void aws_thread_scheduler_schedule_future(
+ struct aws_thread_scheduler *scheduler,
+ struct aws_task *task,
+ uint64_t time_to_run);
+
+/**
+ * Schedules a task to run as soon as possible.
+ */
+AWS_COMMON_API void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task);
+
+/**
+ * Cancel a task that has been scheduled. The cancellation callback will be invoked in the background thread.
+ * This function is slow, so please don't do it in the hot path for your code.
+ */
+AWS_COMMON_API void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_COMMON_THREAD_SCHEDULER_H */
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator.c b/contrib/restricted/aws/aws-c-common/source/allocator.c
index 6ffb531509..a672662470 100644
--- a/contrib/restricted/aws/aws-c-common/source/allocator.c
+++ b/contrib/restricted/aws/aws-c-common/source/allocator.c
@@ -25,6 +25,10 @@
# pragma warning(disable : 4100)
#endif
+#ifndef PAGE_SIZE
+# define PAGE_SIZE (4 * 1024)
+#endif
+
bool aws_allocator_is_valid(const struct aws_allocator *alloc) {
/* An allocator must define mem_acquire and mem_release. All other fields are optional */
return alloc && AWS_OBJECT_PTR_IS_READABLE(alloc) && alloc->mem_acquire && alloc->mem_release;
@@ -32,23 +36,74 @@ bool aws_allocator_is_valid(const struct aws_allocator *alloc) {
static void *s_default_malloc(struct aws_allocator *allocator, size_t size) {
(void)allocator;
- return malloc(size);
+ /* larger allocations should be aligned so that AVX and friends can avoid
+ * the extra preable during unaligned versions of memcpy/memset on big buffers
+ * This will also accelerate hardware CRC and SHA on ARM chips
+ *
+ * 64 byte alignment for > page allocations on 64 bit systems
+ * 32 byte alignment for > page allocations on 32 bit systems
+ * 16 byte alignment for <= page allocations on 64 bit systems
+ * 8 byte alignment for <= page allocations on 32 bit systems
+ *
+ * We use PAGE_SIZE as the boundary because we are not aware of any allocations of
+ * this size or greater that are not data buffers
+ */
+ const size_t alignment = sizeof(void *) * (size > PAGE_SIZE ? 8 : 2);
+#if !defined(_WIN32)
+ void *result = NULL;
+ int err = posix_memalign(&result, alignment, size);
+ (void)err;
+ AWS_PANIC_OOM(result, "posix_memalign failed to allocate memory");
+ return result;
+#else
+ void *mem = _aligned_malloc(size, alignment);
+ AWS_FATAL_POSTCONDITION(mem && "_aligned_malloc failed to allocate memory");
+ return mem;
+#endif
}
static void s_default_free(struct aws_allocator *allocator, void *ptr) {
(void)allocator;
+#if !defined(_WIN32)
free(ptr);
+#else
+ _aligned_free(ptr);
+#endif
}
static void *s_default_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) {
(void)allocator;
(void)oldsize;
- return realloc(ptr, newsize);
+ AWS_FATAL_PRECONDITION(newsize);
+
+#if !defined(_WIN32)
+ if (newsize <= oldsize) {
+ return ptr;
+ }
+
+ /* newsize is > oldsize, need more memory */
+ void *new_mem = s_default_malloc(allocator, newsize);
+ AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in s_default_malloc");
+
+ if (ptr) {
+ memcpy(new_mem, ptr, oldsize);
+ s_default_free(allocator, ptr);
+ }
+
+ return new_mem;
+#else
+ const size_t alignment = sizeof(void *) * (newsize > PAGE_SIZE ? 8 : 2);
+ void *new_mem = _aligned_realloc(ptr, newsize, alignment);
+ AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in _aligned_realloc");
+ return new_mem;
+#endif
}
static void *s_default_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- (void)allocator;
- return calloc(num, size);
+ void *mem = s_default_malloc(allocator, num * size);
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in s_default_malloc");
+ memset(mem, 0, num * size);
+ return mem;
}
static struct aws_allocator default_allocator = {
@@ -69,9 +124,8 @@ void *aws_mem_acquire(struct aws_allocator *allocator, size_t size) {
AWS_FATAL_PRECONDITION(size != 0);
void *mem = allocator->mem_acquire(allocator, size);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator");
+
return mem;
}
@@ -84,28 +138,21 @@ void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
/* Defensive check: never use calloc with size * num that would overflow
* https://wiki.sei.cmu.edu/confluence/display/c/MEM07-C.+Ensure+that+the+arguments+to+calloc%28%29%2C+when+multiplied%2C+do+not+wrap
*/
- size_t required_bytes;
- if (aws_mul_size_checked(num, size, &required_bytes)) {
- return NULL;
- }
+ size_t required_bytes = 0;
+ AWS_FATAL_POSTCONDITION(!aws_mul_size_checked(num, size, &required_bytes), "calloc computed size > SIZE_MAX");
/* If there is a defined calloc, use it */
if (allocator->mem_calloc) {
void *mem = allocator->mem_calloc(allocator, num, size);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator");
return mem;
}
/* Otherwise, emulate calloc */
void *mem = allocator->mem_acquire(allocator, required_bytes);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- return NULL;
- }
+ AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator");
+
memset(mem, 0, required_bytes);
- AWS_POSTCONDITION(mem != NULL);
return mem;
}
@@ -136,10 +183,7 @@ void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...) {
if (total_size > 0) {
allocation = aws_mem_acquire(allocator, total_size);
- if (!allocation) {
- aws_raise_error(AWS_ERROR_OOM);
- goto cleanup;
- }
+ AWS_PANIC_OOM(allocation, "Unhandled OOM encountered in aws_mem_acquire with allocator");
uint8_t *current_ptr = allocation;
@@ -155,7 +199,6 @@ void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...) {
}
}
-cleanup:
va_end(args_allocs);
return allocation;
}
@@ -185,9 +228,8 @@ int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize,
if (allocator->mem_realloc) {
void *newptr = allocator->mem_realloc(allocator, *ptr, oldsize, newsize);
- if (!newptr) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(newptr, "Unhandled OOM encountered in aws_mem_acquire with allocator");
+
*ptr = newptr;
return AWS_OP_SUCCESS;
}
@@ -198,9 +240,7 @@ int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize,
}
void *newptr = allocator->mem_acquire(allocator, newsize);
- if (!newptr) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_PANIC_OOM(newptr, "Unhandled OOM encountered in aws_mem_acquire with allocator");
memcpy(newptr, *ptr, oldsize);
memset((uint8_t *)newptr + oldsize, 0, newsize - oldsize);
@@ -225,10 +265,6 @@ static void *s_cf_allocator_allocate(CFIndex alloc_size, CFOptionFlags hint, voi
void *mem = aws_mem_acquire(allocator, (size_t)alloc_size + sizeof(size_t));
- if (!mem) {
- return NULL;
- }
-
size_t allocation_size = (size_t)alloc_size + sizeof(size_t);
memcpy(mem, &allocation_size, sizeof(size_t));
return (void *)((uint8_t *)mem + sizeof(size_t));
@@ -252,9 +288,7 @@ static void *s_cf_allocator_reallocate(void *ptr, CFIndex new_size, CFOptionFlag
size_t original_size = 0;
memcpy(&original_size, original_allocation, sizeof(size_t));
- if (aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size)) {
- return NULL;
- }
+ aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size);
size_t new_allocation_size = (size_t)new_size;
memcpy(original_allocation, &new_allocation_size, sizeof(size_t));
@@ -298,9 +332,7 @@ CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator) {
cf_allocator = CFAllocatorCreate(NULL, &context);
- if (!cf_allocator) {
- aws_raise_error(AWS_ERROR_OOM);
- }
+ AWS_FATAL_ASSERT(cf_allocator && "creation of cf allocator failed!");
return cf_allocator;
}
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
index d30c67c37e..47f080acad 100644
--- a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
+++ b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
@@ -186,11 +186,15 @@ static void s_sba_clean_up(struct small_block_allocator *sba) {
for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
void *page_addr = NULL;
aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
- s_aligned_free(page_addr);
+ struct page_header *page = page_addr;
+ AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (bin)");
+ s_aligned_free(page);
}
if (bin->page_cursor) {
void *page_addr = s_page_base(bin->page_cursor);
- s_aligned_free(page_addr);
+ struct page_header *page = page_addr;
+ AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (page)");
+ s_aligned_free(page);
}
aws_array_list_clean_up(&bin->active_pages);
@@ -238,6 +242,53 @@ void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator) {
aws_mem_release(allocator, sba);
}
+size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator) {
+ AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator");
+ struct small_block_allocator *sba = sba_allocator->impl;
+ AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl");
+
+ size_t used = 0;
+ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
+ struct sba_bin *bin = &sba->bins[idx];
+ sba->lock(&bin->mutex);
+ for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
+ void *page_addr = NULL;
+ aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
+ struct page_header *page = page_addr;
+ used += page->alloc_count * bin->size;
+ }
+ if (bin->page_cursor) {
+ void *page_addr = s_page_base(bin->page_cursor);
+ struct page_header *page = page_addr;
+ used += page->alloc_count * bin->size;
+ }
+ sba->unlock(&bin->mutex);
+ }
+
+ return used;
+}
+
+size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator) {
+ AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator");
+ struct small_block_allocator *sba = sba_allocator->impl;
+ AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl");
+
+ size_t used = 0;
+ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
+ struct sba_bin *bin = &sba->bins[idx];
+ sba->lock(&bin->mutex);
+ used += (bin->active_pages.length + (bin->page_cursor != NULL)) * AWS_SBA_PAGE_SIZE;
+ sba->unlock(&bin->mutex);
+ }
+
+ return used;
+}
+
+size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator) {
+ (void)sba_allocator;
+ return AWS_SBA_PAGE_SIZE;
+}
+
/* NOTE: Expects the mutex to be held by the caller */
static void *s_sba_alloc_from_bin(struct sba_bin *bin) {
/* check the free list, hand chunks out in FIFO order */
diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
index 6385c146fb..ffc6e0d4c9 100644
--- a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
+++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
@@ -51,14 +51,36 @@ static bool s_has_sse42(void) {
static bool s_has_avx2(void) {
uint32_t abcd[4];
- uint32_t avx2_bmi12_mask = (1 << 5) | (1 << 3) | (1 << 8);
- /* CPUID.(EAX=01H, ECX=0H):ECX.FMA[bit 12]==1 &&
- CPUID.(EAX=01H, ECX=0H):ECX.MOVBE[bit 22]==1 &&
- CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1 */
+
+ /* Check AVX2:
+ * CPUID.(EAX=07H, ECX=0H):EBX.AVX2[bit 5]==1 */
+ uint32_t avx2_mask = (1 << 5);
aws_run_cpuid(7, 0, abcd);
+ if ((abcd[1] & avx2_mask) != avx2_mask) {
+ return false;
+ }
- if ((abcd[1] & avx2_bmi12_mask) != avx2_bmi12_mask)
+ /* Also check AVX:
+ * CPUID.(EAX=01H, ECX=0H):ECX.AVX[bit 28]==1
+ *
+ * NOTE: It SHOULD be impossible for a CPU to support AVX2 without supporting AVX.
+ * But we've received crash reports where the AVX2 feature check passed
+ * and then an AVX instruction caused an "invalid instruction" crash.
+ *
+ * We diagnosed these machines by asking users to run the sample program from:
+ * https://docs.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex?view=msvc-160
+ * and observed the following results:
+ *
+ * AVX not supported
+ * AVX2 supported
+ *
+ * We don't know for sure what was up with those machines, but this extra
+ * check should stop them from running our AVX/AVX2 code paths. */
+ uint32_t avx1_mask = (1 << 28);
+ aws_run_cpuid(1, 0, abcd);
+ if ((abcd[2] & avx1_mask) != avx1_mask) {
return false;
+ }
return true;
}
diff --git a/contrib/restricted/aws/aws-c-common/source/bus.c b/contrib/restricted/aws/aws-c-common/source/bus.c
new file mode 100644
index 0000000000..68bb29deda
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/bus.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#include <aws/common/bus.h>
+
+#include <aws/common/allocator.h>
+#include <aws/common/atomics.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/logging.h>
+#include <aws/common/mutex.h>
+#include <aws/common/thread.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4204) /* nonstandard extension used: non-constant aggregate initializer */
+#endif
+
+struct aws_bus {
+ struct aws_allocator *allocator;
+
+ /* vtable and additional data structures for delivery policy */
+ void *impl;
+};
+
+/* MUST be the first member of any impl to allow blind casting */
+struct bus_vtable {
+ void (*clean_up)(struct aws_bus *bus);
+
+ int (*send)(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *));
+
+ int (*subscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data);
+
+ void (*unsubscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data);
+};
+
+/* each bound callback is stored as a bus_listener in the slots table */
+struct bus_listener {
+ struct aws_linked_list_node list_node;
+ void *user_data;
+ aws_bus_listener_fn *deliver;
+};
+
+/* value type stored in each slot in the slots table in a bus */
+struct listener_list {
+ struct aws_allocator *allocator;
+ struct aws_linked_list listeners;
+};
+
+/* find a listener list (or NULL) by address */
+static struct listener_list *bus_find_listeners(struct aws_hash_table *slots, uint64_t address) {
+ struct aws_hash_element *elem = NULL;
+ if (aws_hash_table_find(slots, (void *)(uintptr_t)address, &elem)) {
+ return NULL;
+ }
+
+ if (!elem) {
+ return NULL;
+ }
+
+ struct listener_list *list = elem->value;
+ return list;
+}
+
+/* find a listener list by address, or create/insert/return a new one */
+static struct listener_list *bus_find_or_create_listeners(
+ struct aws_allocator *allocator,
+ struct aws_hash_table *slots,
+ uint64_t address) {
+ struct listener_list *list = bus_find_listeners(slots, address);
+ if (list) {
+ return list;
+ }
+
+ list = aws_mem_calloc(allocator, 1, sizeof(struct listener_list));
+ list->allocator = allocator;
+ aws_linked_list_init(&list->listeners);
+ aws_hash_table_put(slots, (void *)(uintptr_t)address, list, NULL);
+ return list;
+}
+
+static void s_bus_deliver_msg_to_slot(
+ struct aws_bus *bus,
+ uint64_t slot,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ const void *payload) {
+ (void)bus;
+ struct listener_list *list = bus_find_listeners(slots, slot);
+ if (!list) {
+ return;
+ }
+ struct aws_linked_list_node *node = aws_linked_list_begin(&list->listeners);
+ for (; node != aws_linked_list_end(&list->listeners); node = aws_linked_list_next(node)) {
+ struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node);
+ listener->deliver(address, payload, listener->user_data);
+ }
+}
+
+/* common delivery logic */
+static void s_bus_deliver_msg(
+ struct aws_bus *bus,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ const void *payload) {
+ s_bus_deliver_msg_to_slot(bus, AWS_BUS_ADDRESS_ALL, address, slots, payload);
+ s_bus_deliver_msg_to_slot(bus, address, address, slots, payload);
+}
+
+/* common subscribe logic */
+static int s_bus_subscribe(
+ struct aws_bus *bus,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ aws_bus_listener_fn *callback,
+ void *user_data) {
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot directly subscribe to AWS_BUS_ADDRESS_CLOSE(0)");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct listener_list *list = bus_find_or_create_listeners(bus->allocator, slots, address);
+ struct bus_listener *listener = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_listener));
+ listener->deliver = callback;
+ listener->user_data = user_data;
+ aws_linked_list_push_back(&list->listeners, &listener->list_node);
+
+ return AWS_OP_SUCCESS;
+}
+
+/* common unsubscribe logic */
+static void s_bus_unsubscribe(
+ struct aws_bus *bus,
+ uint64_t address,
+ struct aws_hash_table *slots,
+ aws_bus_listener_fn *callback,
+ void *user_data) {
+ (void)bus;
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_WARN(AWS_LS_COMMON_BUS, "Attempted to unsubscribe from invalid address AWS_BUS_ADDRESS_CLOSE")
+ return;
+ }
+
+ struct listener_list *list = bus_find_listeners(slots, address);
+ if (!list) {
+ return;
+ }
+
+ struct aws_linked_list_node *node;
+ for (node = aws_linked_list_begin(&list->listeners); node != aws_linked_list_end(&list->listeners);
+ node = aws_linked_list_next(node)) {
+
+ struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node);
+ if (listener->deliver == callback && listener->user_data == user_data) {
+ aws_linked_list_remove(node);
+ aws_mem_release(list->allocator, listener);
+ return;
+ }
+ }
+}
+
+/* destructor for listener lists in the slots tables */
+void s_bus_destroy_listener_list(void *data) {
+ struct listener_list *list = data;
+ AWS_PRECONDITION(list->allocator);
+ /* call all listeners with an AWS_BUS_ADDRESS_CLOSE message type to clean up */
+ while (!aws_linked_list_empty(&list->listeners)) {
+ struct aws_linked_list_node *back = aws_linked_list_back(&list->listeners);
+ struct bus_listener *listener = AWS_CONTAINER_OF(back, struct bus_listener, list_node);
+ listener->deliver(AWS_BUS_ADDRESS_CLOSE, NULL, listener->user_data);
+ aws_linked_list_pop_back(&list->listeners);
+ aws_mem_release(list->allocator, listener);
+ }
+ aws_mem_release(list->allocator, list);
+}
+
+/*
+ * AWS_BUS_SYNC implementation
+ */
+struct bus_sync_impl {
+ struct bus_vtable vtable;
+ struct {
+ /* Map of address -> list of listeners */
+ struct aws_hash_table table;
+ } slots;
+};
+
+static void s_bus_sync_clean_up(struct aws_bus *bus) {
+ struct bus_sync_impl *impl = bus->impl;
+ aws_hash_table_clean_up(&impl->slots.table);
+ aws_mem_release(bus->allocator, impl);
+}
+
+static int s_bus_sync_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
+ struct bus_sync_impl *impl = bus->impl;
+ s_bus_deliver_msg(bus, address, &impl->slots.table, payload);
+ if (destructor) {
+ destructor(payload);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_bus_sync_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data) {
+ struct bus_sync_impl *impl = bus->impl;
+ return s_bus_subscribe(bus, address, &impl->slots.table, callback, user_data);
+}
+
+static void s_bus_sync_unsubscribe(
+ struct aws_bus *bus,
+ uint64_t address,
+ aws_bus_listener_fn *callback,
+ void *user_data) {
+ struct bus_sync_impl *impl = bus->impl;
+ s_bus_unsubscribe(bus, address, &impl->slots.table, callback, user_data);
+}
+
+static struct bus_vtable bus_sync_vtable = {
+ .clean_up = s_bus_sync_clean_up,
+ .send = s_bus_sync_send,
+ .subscribe = s_bus_sync_subscribe,
+ .unsubscribe = s_bus_sync_unsubscribe,
+};
+
+static void s_bus_sync_init(struct aws_bus *bus, const struct aws_bus_options *options) {
+ (void)options;
+
+ struct bus_sync_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_sync_impl));
+ impl->vtable = bus_sync_vtable;
+
+ if (aws_hash_table_init(
+ &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) {
+ goto error;
+ }
+
+ return;
+
+error:
+ aws_mem_release(bus->allocator, impl);
+}
+
+/*
+ * AWS_BUS_ASYNC implementation
+ */
+struct bus_async_impl {
+ struct bus_vtable vtable;
+ struct {
+ /* Map of address -> list of listeners */
+ struct aws_hash_table table;
+ } slots;
+
+ /* Queue of bus_messages to deliver */
+ struct {
+ struct aws_mutex mutex;
+ /* backing memory for the message free list */
+ void *buffer;
+ void *buffer_end; /* 1 past the end of buffer */
+ /* message free list */
+ struct aws_linked_list free; /* struct bus_message */
+ /* message delivery queue */
+ struct aws_linked_list msgs; /* struct bus_message */
+ /* list of pending adds/removes of listeners */
+ struct aws_linked_list subs; /* struct pending_listener */
+ } queue;
+
+ /* dispatch thread */
+ struct {
+ struct aws_thread thread;
+ struct aws_condition_variable notify;
+ bool running;
+ struct aws_atomic_var started;
+ struct aws_atomic_var exited;
+ } dispatch;
+
+ bool reliable;
+};
+
+/* represents a message in the queue on impls that queue */
+struct bus_message {
+ struct aws_linked_list_node list_node;
+ uint64_t address;
+ void *payload;
+
+ void (*destructor)(void *);
+};
+
+struct pending_listener {
+ struct aws_linked_list_node list_node;
+ uint64_t address;
+ aws_bus_listener_fn *listener;
+ void *user_data;
+ uint32_t add : 1;
+ uint32_t remove : 1;
+};
+
+static void s_bus_message_clean_up(struct bus_message *msg) {
+ if (msg->destructor) {
+ msg->destructor(msg->payload);
+ }
+ msg->destructor = NULL;
+ msg->payload = NULL;
+}
+
+/* Assumes the caller holds the lock */
+static void s_bus_async_free_message(struct aws_bus *bus, struct bus_message *msg) {
+ struct bus_async_impl *impl = bus->impl;
+ s_bus_message_clean_up(msg);
+ if ((void *)msg >= impl->queue.buffer && (void *)msg < impl->queue.buffer_end) {
+ AWS_ZERO_STRUCT(*msg);
+ aws_linked_list_push_back(&impl->queue.free, &msg->list_node);
+ return;
+ }
+ aws_mem_release(bus->allocator, msg);
+}
+
+/* Assumes the caller holds the lock */
+struct bus_message *s_bus_async_alloc_message(struct aws_bus *bus) {
+ struct bus_async_impl *impl = bus->impl;
+
+ /* try the free list first */
+ if (!aws_linked_list_empty(&impl->queue.free)) {
+ struct aws_linked_list_node *msg_node = aws_linked_list_pop_back(&impl->queue.free);
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ return msg;
+ }
+
+ /* unreliable will re-use the oldest message */
+ if (!impl->reliable) {
+ struct aws_linked_list_node *msg_node = aws_linked_list_pop_front(&impl->queue.msgs);
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ s_bus_async_free_message(bus, msg);
+ return s_bus_async_alloc_message(bus);
+ }
+
+ return aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_message));
+}
+
+/*
+ * resolve all adds and removes of listeners, in FIFO order
+ * NOTE: expects mutex to be held by caller
+ */
+static void s_bus_apply_listeners(struct aws_bus *bus, struct aws_linked_list *pending_subs) {
+ struct bus_async_impl *impl = bus->impl;
+ while (!aws_linked_list_empty(pending_subs)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(pending_subs);
+ struct pending_listener *listener = AWS_CONTAINER_OF(node, struct pending_listener, list_node);
+ if (listener->add) {
+ s_bus_subscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data);
+ } else if (listener->remove) {
+ s_bus_unsubscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data);
+ }
+ aws_mem_release(bus->allocator, listener);
+ }
+}
+
+static void s_bus_async_deliver_messages(struct aws_bus *bus, struct aws_linked_list *pending_msgs) {
+ struct bus_async_impl *impl = bus->impl;
+ struct aws_linked_list_node *msg_node = aws_linked_list_begin(pending_msgs);
+ for (; msg_node != aws_linked_list_end(pending_msgs); msg_node = aws_linked_list_next(msg_node)) {
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ s_bus_deliver_msg(bus, msg->address, &impl->slots.table, msg->payload);
+ s_bus_message_clean_up(msg);
+ }
+
+ /* push all pending messages back on the free list */
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ while (!aws_linked_list_empty(pending_msgs)) {
+ msg_node = aws_linked_list_pop_front(pending_msgs);
+ struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
+ s_bus_async_free_message(bus, msg);
+ }
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+}
+
+static void s_bus_async_clean_up(struct aws_bus *bus) {
+ struct bus_async_impl *impl = bus->impl;
+
+ /* shut down delivery thread, clean up dispatch */
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: starting final drain", (void *)bus);
+ aws_mutex_lock(&impl->queue.mutex);
+ impl->dispatch.running = false;
+ aws_mutex_unlock(&impl->queue.mutex);
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+ /* Spin wait for the final drain and dispatch thread to complete */
+ while (!aws_atomic_load_int(&impl->dispatch.exited)) {
+ aws_thread_current_sleep(1000 * 1000); /* 1 microsecond */
+ }
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: finished final drain", (void *)bus);
+ aws_thread_join(&impl->dispatch.thread);
+ aws_thread_clean_up(&impl->dispatch.thread);
+ aws_condition_variable_clean_up(&impl->dispatch.notify);
+
+ /* should be impossible for subs or msgs to remain after final drain */
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.msgs));
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.subs));
+
+ /* this frees everything that the free/msgs lists point to */
+ if (impl->queue.buffer) {
+ aws_mem_release(bus->allocator, impl->queue.buffer);
+ }
+
+ aws_mutex_clean_up(&impl->queue.mutex);
+
+ aws_hash_table_clean_up(&impl->slots.table);
+ aws_mem_release(bus->allocator, impl);
+}
+
+static bool s_bus_async_should_wake_up(void *user_data) {
+ struct bus_async_impl *impl = user_data;
+ return !impl->dispatch.running || !aws_linked_list_empty(&impl->queue.subs) ||
+ !aws_linked_list_empty(&impl->queue.msgs);
+}
+
+static bool s_bus_async_is_running(struct bus_async_impl *impl) {
+ aws_mutex_lock(&impl->queue.mutex);
+ bool running = impl->dispatch.running;
+ aws_mutex_unlock(&impl->queue.mutex);
+ return running;
+}
+
+/* Async bus delivery thread loop */
+static void s_bus_async_deliver(void *user_data) {
+ struct aws_bus *bus = user_data;
+ struct bus_async_impl *impl = bus->impl;
+
+ aws_atomic_store_int(&impl->dispatch.started, 1);
+ AWS_LOGF_DEBUG(AWS_LS_COMMON_BUS, "bus %p: delivery thread loop started", (void *)bus);
+
+ /* once shutdown has been triggered, need to drain one more time to ensure all queues are empty */
+ int pending_drains = 1;
+ do {
+ struct aws_linked_list pending_msgs;
+ aws_linked_list_init(&pending_msgs);
+
+ struct aws_linked_list pending_subs;
+ aws_linked_list_init(&pending_subs);
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ aws_condition_variable_wait_pred(
+ &impl->dispatch.notify, &impl->queue.mutex, s_bus_async_should_wake_up, impl);
+
+ /* copy out any queued subs/unsubs */
+ aws_linked_list_swap_contents(&impl->queue.subs, &pending_subs);
+ /* copy out any queued messages */
+ aws_linked_list_swap_contents(&impl->queue.msgs, &pending_msgs);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* first resolve subs/unsubs */
+ if (!aws_linked_list_empty(&pending_subs)) {
+ s_bus_apply_listeners(bus, &pending_subs);
+ }
+
+ /* Then deliver queued messages */
+ if (!aws_linked_list_empty(&pending_msgs)) {
+ s_bus_async_deliver_messages(bus, &pending_msgs);
+ }
+ } while (s_bus_async_is_running(impl) || pending_drains--);
+
+ /* record that the dispatch thread is done */
+ aws_atomic_store_int(&impl->dispatch.exited, 1);
+}
+
+int s_bus_async_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
+ struct bus_async_impl *impl = bus->impl;
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ if (!impl->dispatch.running) {
+ AWS_LOGF_WARN(
+ AWS_LS_COMMON_BUS, "bus %p: message sent after clean_up: address: %" PRIu64 "", (void *)bus, address);
+ aws_mutex_unlock(&impl->queue.mutex);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct bus_message *msg = s_bus_async_alloc_message(bus);
+ msg->address = address;
+ msg->payload = payload;
+ msg->destructor = destructor;
+
+ /* push the message onto the delivery queue */
+ aws_linked_list_push_back(&impl->queue.msgs, &msg->list_node);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* notify the delivery thread to wake up */
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+
+ return AWS_OP_SUCCESS;
+}
+
+int s_bus_async_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_async_impl *impl = bus->impl;
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot subscribe to AWS_BUS_ADDRESS_CLOSE");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ if (!impl->dispatch.running) {
+ AWS_LOGF_WARN(
+ AWS_LS_COMMON_BUS,
+ "bus %p: subscribe requested after clean_up: address: %" PRIu64 "",
+ (void *)bus,
+ address);
+ aws_mutex_unlock(&impl->queue.mutex);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct pending_listener *sub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener));
+ sub->address = address;
+ sub->listener = listener;
+ sub->user_data = user_data;
+ sub->add = true;
+ aws_linked_list_push_back(&impl->queue.subs, &sub->list_node);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* notify the delivery thread to wake up */
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+ return AWS_OP_SUCCESS;
+}
+
+void s_bus_async_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_async_impl *impl = bus->impl;
+
+ if (address == AWS_BUS_ADDRESS_CLOSE) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot unsubscribe from AWS_BUS_ADDRESS_CLOSE");
+ return;
+ }
+
+ aws_mutex_lock(&impl->queue.mutex);
+ {
+ if (!impl->dispatch.running) {
+ AWS_LOGF_WARN(
+ AWS_LS_COMMON_BUS,
+ "bus %p: unsubscribe requested after clean_up: address: %" PRIu64 "",
+ (void *)bus,
+ address);
+ aws_mutex_unlock(&impl->queue.mutex);
+ return;
+ }
+
+ struct pending_listener *unsub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener));
+ unsub->address = address;
+ unsub->listener = listener;
+ unsub->user_data = user_data;
+ unsub->remove = true;
+ aws_linked_list_push_back(&impl->queue.subs, &unsub->list_node);
+ }
+ aws_mutex_unlock(&impl->queue.mutex);
+
+ /* notify the delivery thread to wake up */
+ aws_condition_variable_notify_one(&impl->dispatch.notify);
+}
+
+static struct bus_vtable bus_async_vtable = {
+ .clean_up = s_bus_async_clean_up,
+ .send = s_bus_async_send,
+ .subscribe = s_bus_async_subscribe,
+ .unsubscribe = s_bus_async_unsubscribe,
+};
+
+static void s_bus_async_init(struct aws_bus *bus, const struct aws_bus_options *options) {
+ struct bus_async_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_async_impl));
+ impl->vtable = bus_async_vtable;
+ impl->reliable = (options->policy == AWS_BUS_ASYNC_RELIABLE);
+
+ /* init msg queue */
+ if (aws_mutex_init(&impl->queue.mutex)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize queue synchronization: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ aws_linked_list_init(&impl->queue.msgs);
+ aws_linked_list_init(&impl->queue.free);
+ aws_linked_list_init(&impl->queue.subs);
+
+ /* push as many bus_messages as we can into the free list from the buffer */
+ if (options->buffer_size) {
+ impl->queue.buffer = aws_mem_calloc(bus->allocator, 1, options->buffer_size);
+ impl->queue.buffer_end = ((uint8_t *)impl->queue.buffer) + options->buffer_size;
+ const int msg_count = (int)(options->buffer_size / sizeof(struct bus_message));
+ for (int msg_idx = 0; msg_idx < msg_count; ++msg_idx) {
+ struct bus_message *msg = (void *)&((char *)impl->queue.buffer)[msg_idx * sizeof(struct bus_message)];
+ aws_linked_list_push_back(&impl->queue.free, &msg->list_node);
+ }
+ }
+
+ /* init subscription table */
+ if (aws_hash_table_init(
+ &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize bus addressing table: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Setup dispatch thread */
+ if (aws_condition_variable_init(&impl->dispatch.notify)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize async notify: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_thread_init(&impl->dispatch.thread, bus->allocator)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to initialize background thread: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ impl->dispatch.running = true;
+ aws_atomic_init_int(&impl->dispatch.started, 0);
+ aws_atomic_init_int(&impl->dispatch.exited, 0);
+ if (aws_thread_launch(&impl->dispatch.thread, s_bus_async_deliver, bus, aws_default_thread_options())) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_BUS,
+ "bus %p: Unable to launch delivery thread: %s",
+ (void *)bus,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* wait for dispatch thread to start before returning control */
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Waiting for delivery thread to start", (void *)bus);
+ while (!aws_atomic_load_int(&impl->dispatch.started)) {
+ aws_thread_current_sleep(1000 * 1000);
+ }
+ AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Delivery thread started", (void *)bus);
+
+ return;
+
+error:
+ aws_thread_clean_up(&impl->dispatch.thread);
+ aws_condition_variable_clean_up(&impl->dispatch.notify);
+ aws_hash_table_clean_up(&impl->slots.table);
+ aws_mem_release(bus->allocator, &impl->queue.buffer);
+ aws_mutex_clean_up(&impl->queue.mutex);
+ aws_mem_release(bus->allocator, impl);
+ bus->impl = NULL;
+}
+
+/*
+ * Public API
+ */
+struct aws_bus *aws_bus_new(struct aws_allocator *allocator, const struct aws_bus_options *options) {
+ struct aws_bus *bus = aws_mem_calloc(allocator, 1, sizeof(struct aws_bus));
+ bus->allocator = allocator;
+
+ switch (options->policy) {
+ case AWS_BUS_ASYNC_RELIABLE:
+ case AWS_BUS_ASYNC_UNRELIABLE:
+ s_bus_async_init(bus, options);
+ break;
+ case AWS_BUS_SYNC_RELIABLE:
+ s_bus_sync_init(bus, options);
+ break;
+ }
+
+ if (!bus->impl) {
+ aws_mem_release(allocator, bus);
+ return NULL;
+ }
+
+ return bus;
+}
+
+void aws_bus_destroy(struct aws_bus *bus) {
+ struct bus_vtable *vtable = bus->impl;
+ vtable->clean_up(bus);
+ aws_mem_release(bus->allocator, bus);
+}
+
+int aws_bus_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_vtable *vtable = bus->impl;
+ return vtable->subscribe(bus, address, listener, user_data);
+}
+
+void aws_bus_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
+ struct bus_vtable *vtable = bus->impl;
+ vtable->unsubscribe(bus, address, listener, user_data);
+}
+
+int aws_bus_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
+ struct bus_vtable *vtable = bus->impl;
+ return vtable->send(bus, address, payload, destructor);
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
diff --git a/contrib/restricted/aws/aws-c-common/source/byte_buf.c b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
index ca18f4121b..f52aa16b45 100644
--- a/contrib/restricted/aws/aws-c-common/source/byte_buf.c
+++ b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
@@ -496,7 +496,7 @@ uint64_t aws_hash_array_ignore_case(const void *array, const size_t len) {
const uint64_t fnv_prime = 0x100000001b3ULL;
const uint8_t *i = array;
- const uint8_t *end = i + len;
+ const uint8_t *end = (i == NULL) ? NULL : (i + len);
uint64_t hash = fnv_offset_basis;
while (i != end) {
@@ -558,6 +558,42 @@ bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cu
return rv;
}
+bool aws_byte_cursor_starts_with(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix) {
+
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(input));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(prefix));
+
+ if (input->len < prefix->len) {
+ return false;
+ }
+
+ struct aws_byte_cursor start = {.ptr = input->ptr, .len = prefix->len};
+ bool rv = aws_byte_cursor_eq(&start, prefix);
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(input));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(prefix));
+ return rv;
+}
+
+bool aws_byte_cursor_starts_with_ignore_case(
+ const struct aws_byte_cursor *input,
+ const struct aws_byte_cursor *prefix) {
+
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(input));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(prefix));
+
+ if (input->len < prefix->len) {
+ return false;
+ }
+
+ struct aws_byte_cursor start = {.ptr = input->ptr, .len = prefix->len};
+ bool rv = aws_byte_cursor_eq_ignore_case(&start, prefix);
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(input));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(prefix));
+ return rv;
+}
+
int aws_byte_buf_append(struct aws_byte_buf *to, const struct aws_byte_cursor *from) {
AWS_PRECONDITION(aws_byte_buf_is_valid(to));
AWS_PRECONDITION(aws_byte_cursor_is_valid(from));
@@ -750,7 +786,13 @@ int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity)
AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
return AWS_OP_SUCCESS;
}
-
+ if (!buffer->buffer && !buffer->capacity && requested_capacity > buffer->capacity) {
+ if (aws_byte_buf_init(buffer, buffer->allocator, requested_capacity)) {
+ return AWS_OP_ERR;
+ }
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
+ return AWS_OP_SUCCESS;
+ }
if (aws_mem_realloc(buffer->allocator, (void **)&buffer->buffer, buffer->capacity, requested_capacity)) {
return AWS_OP_ERR;
}
@@ -857,6 +899,13 @@ int aws_byte_cursor_compare_lookup(
AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs));
AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs));
AWS_PRECONDITION(AWS_MEM_IS_READABLE(lookup_table, 256));
+ if (lhs->len == 0 && rhs->len == 0) {
+ return 0;
+ } else if (lhs->len == 0) {
+ return -1;
+ } else if (rhs->len == 0) {
+ return 1;
+ }
const uint8_t *lhs_curr = lhs->ptr;
const uint8_t *lhs_end = lhs_curr + lhs->len;
@@ -1047,8 +1096,7 @@ struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cur
} else {
rv.ptr = cursor->ptr;
rv.len = len;
-
- cursor->ptr += len;
+ cursor->ptr = (cursor->ptr == NULL) ? NULL : cursor->ptr + len;
cursor->len -= len;
}
AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
@@ -1089,7 +1137,7 @@ struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *co
/* Make sure anything acting upon the returned cursor _also_ doesn't advance past NULL */
rv.len = len & mask;
- cursor->ptr += len;
+ cursor->ptr = (cursor->ptr == NULL) ? NULL : cursor->ptr + len;
cursor->len -= len;
} else {
rv.ptr = NULL;
@@ -1371,7 +1419,7 @@ bool aws_byte_buf_advance(
AWS_PRECONDITION(aws_byte_buf_is_valid(buffer));
AWS_PRECONDITION(aws_byte_buf_is_valid(output));
if (buffer->capacity - buffer->len >= len) {
- *output = aws_byte_buf_from_array(buffer->buffer + buffer->len, len);
+ *output = aws_byte_buf_from_array((buffer->buffer == NULL) ? NULL : buffer->buffer + buffer->len, len);
buffer->len += len;
output->len = 0;
AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
@@ -1611,18 +1659,52 @@ bool aws_isxdigit(uint8_t ch) {
bool aws_isspace(uint8_t ch) {
switch (ch) {
case 0x20: /* ' ' - space */
- return true;
case 0x09: /* '\t' - horizontal tab */
- return true;
case 0x0A: /* '\n' - line feed */
- return true;
case 0x0B: /* '\v' - vertical tab */
- return true;
case 0x0C: /* '\f' - form feed */
- return true;
case 0x0D: /* '\r' - carriage return */
return true;
default:
return false;
}
}
+
+static int s_read_unsigned(struct aws_byte_cursor cursor, uint64_t *dst, uint8_t base) {
+ uint64_t val = 0;
+ *dst = 0;
+
+ if (cursor.len == 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ const uint8_t *hex_to_num_table = aws_lookup_table_hex_to_num_get();
+
+ /* read from left to right */
+ for (size_t i = 0; i < cursor.len; ++i) {
+ const uint8_t c = cursor.ptr[i];
+ const uint8_t cval = hex_to_num_table[c];
+ if (cval >= base) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (aws_mul_u64_checked(val, base, &val)) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+
+ if (aws_add_u64_checked(val, cval, &val)) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ }
+
+ *dst = val;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_byte_cursor_utf8_parse_u64(struct aws_byte_cursor cursor, uint64_t *dst) {
+ return s_read_unsigned(cursor, dst, 10 /*base*/);
+}
+
+int aws_byte_cursor_utf8_parse_u64_hex(struct aws_byte_cursor cursor, uint64_t *dst) {
+ return s_read_unsigned(cursor, dst, 16 /*base*/);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
index ccbe6d1820..bf2db81e0a 100644
--- a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
+++ b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
@@ -2,13 +2,18 @@
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
+#include <aws/common/byte_buf.h>
#include <aws/common/command_line_parser.h>
+#include <ctype.h>
+
int aws_cli_optind = 1;
int aws_cli_opterr = -1;
int aws_cli_optopt = 0;
+bool aws_cli_on_arg = false;
const char *aws_cli_optarg = NULL;
+const char *aws_cli_positional_arg = NULL;
static const struct aws_cli_option *s_find_option_from_char(
const struct aws_cli_option *longopts,
@@ -31,6 +36,16 @@ static const struct aws_cli_option *s_find_option_from_char(
return NULL;
}
+AWS_COMMON_API void aws_cli_reset_state(void) {
+ aws_cli_optind = 1;
+ aws_cli_opterr = -1;
+ aws_cli_optopt = 0;
+ aws_cli_on_arg = false;
+
+ aws_cli_optarg = NULL;
+ aws_cli_positional_arg = NULL;
+}
+
static const struct aws_cli_option *s_find_option_from_c_str(
const struct aws_cli_option *longopts,
const char *search_for,
@@ -70,22 +85,35 @@ int aws_cli_getopt_long(
char second_char = argv[aws_cli_optind][1];
char *option_start = NULL;
const struct aws_cli_option *option = NULL;
+ bool positional_arg_encountered = false;
if (first_char == '-' && second_char != '-') {
+ aws_cli_on_arg = true;
+ positional_arg_encountered = false;
option_start = &argv[aws_cli_optind][1];
option = s_find_option_from_char(longopts, *option_start, longindex);
} else if (first_char == '-' && second_char == '-') {
+ aws_cli_on_arg = true;
+ positional_arg_encountered = false;
option_start = &argv[aws_cli_optind][2];
option = s_find_option_from_c_str(longopts, option_start, longindex);
} else {
- return -1;
+ if (!aws_cli_on_arg) {
+ aws_cli_positional_arg = argv[aws_cli_optind];
+ positional_arg_encountered = true;
+ } else {
+ aws_cli_on_arg = false;
+ aws_cli_positional_arg = NULL;
+ }
}
aws_cli_optind++;
if (option) {
bool has_arg = false;
+ aws_cli_on_arg = false;
+ aws_cli_positional_arg = NULL;
- char *opt_value = memchr(optstring, option->val, strlen(optstring));
+ char *opt_value = memchr(optstring, option->val, strlen(optstring) + 1);
if (!opt_value) {
return '?';
}
@@ -105,5 +133,28 @@ int aws_cli_getopt_long(
return option->val;
}
- return '?';
+ /* start of text to indicate we just have a text argument. */
+ return positional_arg_encountered ? 0x02 : '?';
+}
+
+int aws_cli_dispatch_on_subcommand(
+ int argc,
+ char *const argv[],
+ struct aws_cli_subcommand_dispatch *dispatch_table,
+ int table_length,
+ void *user_data) {
+ if (argc >= 2) {
+ struct aws_byte_cursor arg_name = aws_byte_cursor_from_c_str(argv[1]);
+ for (int i = 0; i < table_length; ++i) {
+ struct aws_byte_cursor cmd_name = aws_byte_cursor_from_c_str(dispatch_table[i].command_name);
+
+ if (aws_byte_cursor_eq_ignore_case(&arg_name, &cmd_name)) {
+ return dispatch_table[i].subcommand_fn(argc - 1, &argv[1], (const char *)arg_name.ptr, user_data);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/common.c b/contrib/restricted/aws/aws-c-common/source/common.c
index 88c5d262c8..83a91b768f 100644
--- a/contrib/restricted/aws/aws-c-common/source/common.c
+++ b/contrib/restricted/aws/aws-c-common/source/common.c
@@ -4,9 +4,11 @@
*/
#include <aws/common/common.h>
+#include <aws/common/json.h>
#include <aws/common/logging.h>
#include <aws/common/math.h>
#include <aws/common/private/dlloads.h>
+#include <aws/common/private/thread_shared.h>
#include <stdarg.h>
#include <stdlib.h>
@@ -28,6 +30,11 @@
#endif
long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long) = NULL;
+int (*g_numa_available_ptr)(void) = NULL;
+int (*g_numa_num_configured_nodes_ptr)(void) = NULL;
+int (*g_numa_num_possible_cpus_ptr)(void) = NULL;
+int (*g_numa_node_of_cpu_ptr)(int cpu) = NULL;
+
void *g_libnuma_handle = NULL;
void aws_secure_zero(void *pBuf, size_t bufsize) {
@@ -77,6 +84,9 @@ static struct aws_error_info errors[] = {
AWS_ERROR_OOM,
"Out of memory."),
AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_NO_SPACE,
+ "Out of space on disk."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
AWS_ERROR_UNKNOWN,
"Unknown error."),
AWS_DEFINE_ERROR_INFO_COMMON(
@@ -226,6 +236,20 @@ static struct aws_error_info errors[] = {
AWS_DEFINE_ERROR_INFO_COMMON(
AWS_ERROR_DIVIDE_BY_ZERO,
"Attempt to divide a number by zero."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_INVALID_FILE_HANDLE,
+ "Invalid file handle"),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_OPERATION_INTERUPTED,
+ "The operation was interrupted."
+ ),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_DIRECTORY_NOT_EMPTY,
+ "An operation on a directory was attempted which is not allowed when the directory is not empty."
+ ),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_PLATFORM_NOT_SUPPORTED,
+ "Feature not supported on this platform"),
};
/* clang-format on */
@@ -244,8 +268,11 @@ static struct aws_log_subject_info s_common_log_subject_infos[] = {
"task-scheduler",
"Subject for task scheduler or task specific logging."),
DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_THREAD, "thread", "Subject for logging thread related functions."),
- DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_XML_PARSER, "xml-parser", "Subject for xml parser specific logging."),
DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_MEMTRACE, "memtrace", "Output from the aws_mem_trace_dump function"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_XML_PARSER, "xml-parser", "Subject for xml parser specific logging."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_IO, "common-io", "Common IO utilities"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_BUS, "bus", "Message bus"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_TEST, "test", "Unit/integration testing"),
};
static struct aws_log_subject_info_list s_common_log_subject_list = {
@@ -262,11 +289,26 @@ void aws_common_library_init(struct aws_allocator *allocator) {
s_common_library_initialized = true;
aws_register_error_info(&s_list);
aws_register_log_subject_info_list(&s_common_log_subject_list);
+ aws_thread_initialize_thread_management();
+ aws_json_module_init(allocator);
/* NUMA is funky and we can't rely on libnuma.so being available. We also don't want to take a hard dependency on it,
* try and load it if we can. */
#if !defined(_WIN32) && !defined(WIN32)
- g_libnuma_handle = dlopen("libnuma.so", RTLD_NOW);
+ /* libnuma defines set_mempolicy() as a WEAK symbol. Loading into the global symbol table overwrites symbols and
+ assumptions due to the way loaders and dlload are often implemented and those symbols are defined by things
+ like libpthread.so on some unix distros. Sorry about the memory usage here, but it's our only safe choice.
+ Also, please don't do numa configurations if memory is your economic bottlneck. */
+ g_libnuma_handle = dlopen("libnuma.so", RTLD_LOCAL);
+
+ /* turns out so versioning is really inconsistent these days */
+ if (!g_libnuma_handle) {
+ g_libnuma_handle = dlopen("libnuma.so.1", RTLD_LOCAL);
+ }
+
+ if (!g_libnuma_handle) {
+ g_libnuma_handle = dlopen("libnuma.so.2", RTLD_LOCAL);
+ }
if (g_libnuma_handle) {
AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so loaded");
@@ -276,6 +318,35 @@ void aws_common_library_init(struct aws_allocator *allocator) {
} else {
AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() failed to load");
}
+
+ *(void **)(&g_numa_available_ptr) = dlsym(g_libnuma_handle, "numa_available");
+ if (g_numa_available_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_available() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_available() failed to load");
+ }
+
+ *(void **)(&g_numa_num_configured_nodes_ptr) = dlsym(g_libnuma_handle, "numa_num_configured_nodes");
+ if (g_numa_num_configured_nodes_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_configured_nodes() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_configured_nodes() failed to load");
+ }
+
+ *(void **)(&g_numa_num_possible_cpus_ptr) = dlsym(g_libnuma_handle, "numa_num_possible_cpus");
+ if (g_numa_num_possible_cpus_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_possible_cpus() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_possible_cpus() failed to load");
+ }
+
+ *(void **)(&g_numa_node_of_cpu_ptr) = dlsym(g_libnuma_handle, "numa_node_of_cpu");
+ if (g_numa_node_of_cpu_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_node_of_cpu() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_node_of_cpu() failed to load");
+ }
+
} else {
AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so failed to load");
}
@@ -286,8 +357,10 @@ void aws_common_library_init(struct aws_allocator *allocator) {
void aws_common_library_clean_up(void) {
if (s_common_library_initialized) {
s_common_library_initialized = false;
+ aws_thread_join_all_managed();
aws_unregister_error_info(&s_list);
aws_unregister_log_subject_info_list(&s_common_log_subject_list);
+ aws_json_module_cleanup();
#if !defined(_WIN32) && !defined(WIN32)
if (g_libnuma_handle) {
dlclose(g_libnuma_handle);
diff --git a/contrib/restricted/aws/aws-c-common/source/date_time.c b/contrib/restricted/aws/aws-c-common/source/date_time.c
index 8d08e57ad8..77ec6ae0c1 100644
--- a/contrib/restricted/aws/aws-c-common/source/date_time.c
+++ b/contrib/restricted/aws/aws-c-common/source/date_time.c
@@ -12,6 +12,7 @@
#include <aws/common/time.h>
#include <ctype.h>
+#include <math.h>
static const char *RFC822_DATE_FORMAT_STR_MINUS_Z = "%a, %d %b %Y %H:%M:%S GMT";
static const char *RFC822_DATE_FORMAT_STR_WITH_Z = "%a, %d %b %Y %H:%M:%S %Z";
@@ -22,8 +23,8 @@ static const char *ISO_8601_LONG_BASIC_DATE_FORMAT_STR = "%Y%m%dT%H%M%SZ";
static const char *ISO_8601_SHORT_BASIC_DATE_FORMAT_STR = "%Y%m%d";
#define STR_TRIPLET_TO_INDEX(str) \
- (((uint32_t)(uint8_t)tolower((str)[0]) << 0) | ((uint32_t)(uint8_t)tolower((str)[1]) << 8) | \
- ((uint32_t)(uint8_t)tolower((str)[2]) << 16))
+ (((uint32_t)tolower((uint8_t)((str)[0])) << 0) | ((uint32_t)tolower((uint8_t)((str)[1])) << 8) | \
+ ((uint32_t)tolower((uint8_t)((str)[2])) << 16))
static uint32_t s_jan = 0;
static uint32_t s_feb = 0;
@@ -140,7 +141,7 @@ static bool is_utc_time_zone(const char *str) {
}
if (len == 2) {
- return tolower(str[0]) == 'u' && tolower(str[1]) == 't';
+ return tolower((uint8_t)str[0]) == 'u' && tolower((uint8_t)str[1]) == 't';
}
if (len < 3) {
@@ -170,21 +171,25 @@ struct tm s_get_time_struct(struct aws_date_time *dt, bool local_time) {
}
void aws_date_time_init_now(struct aws_date_time *dt) {
- uint64_t current_time = 0;
- aws_sys_clock_get_ticks(&current_time);
- dt->timestamp = (time_t)aws_timestamp_convert(current_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
- dt->gmt_time = s_get_time_struct(dt, false);
- dt->local_time = s_get_time_struct(dt, true);
+ uint64_t current_time_ns = 0;
+ aws_sys_clock_get_ticks(&current_time_ns);
+ aws_date_time_init_epoch_millis(
+ dt, aws_timestamp_convert(current_time_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL));
}
void aws_date_time_init_epoch_millis(struct aws_date_time *dt, uint64_t ms_since_epoch) {
- dt->timestamp = (time_t)(ms_since_epoch / AWS_TIMESTAMP_MILLIS);
+ uint64_t milliseconds = 0;
+ dt->timestamp =
+ (time_t)aws_timestamp_convert(ms_since_epoch, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, &milliseconds);
+ dt->milliseconds = (uint16_t)milliseconds;
dt->gmt_time = s_get_time_struct(dt, false);
dt->local_time = s_get_time_struct(dt, true);
}
void aws_date_time_init_epoch_secs(struct aws_date_time *dt, double sec_ms) {
- dt->timestamp = (time_t)sec_ms;
+ double integral = 0;
+ dt->milliseconds = (uint16_t)(round(modf(sec_ms, &integral) * AWS_TIMESTAMP_MILLIS));
+ dt->timestamp = (time_t)integral;
dt->gmt_time = s_get_time_struct(dt, false);
dt->local_time = s_get_time_struct(dt, true);
}
@@ -629,6 +634,7 @@ int aws_date_time_init_from_str_cursor(
* timestamp. */
dt->timestamp -= seconds_offset;
+ dt->milliseconds = 0U;
dt->gmt_time = s_get_time_struct(dt, false);
dt->local_time = s_get_time_struct(dt, true);
@@ -743,15 +749,17 @@ int aws_date_time_to_utc_time_short_str(
}
double aws_date_time_as_epoch_secs(const struct aws_date_time *dt) {
- return (double)dt->timestamp;
+ return (double)dt->timestamp + (double)(dt->milliseconds / 1000.0);
}
uint64_t aws_date_time_as_nanos(const struct aws_date_time *dt) {
- return (uint64_t)dt->timestamp * AWS_TIMESTAMP_NANOS;
+ return aws_timestamp_convert((uint64_t)dt->timestamp, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL) +
+ aws_timestamp_convert((uint64_t)dt->milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
}
uint64_t aws_date_time_as_millis(const struct aws_date_time *dt) {
- return (uint64_t)dt->timestamp * AWS_TIMESTAMP_MILLIS;
+ return aws_timestamp_convert((uint64_t)dt->timestamp, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL) +
+ (uint64_t)dt->milliseconds;
}
uint16_t aws_date_time_year(const struct aws_date_time *dt, bool local_time) {
diff --git a/contrib/restricted/aws/aws-c-common/source/error.c b/contrib/restricted/aws/aws-c-common/source/error.c
index 60e6c9e799..bdd4dfcd67 100644
--- a/contrib/restricted/aws/aws-c-common/source/error.c
+++ b/contrib/restricted/aws/aws-c-common/source/error.c
@@ -198,6 +198,8 @@ int aws_translate_and_raise_io_error(int error_no) {
return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
case ENOMEM:
return aws_raise_error(AWS_ERROR_OOM);
+ case ENOSPC:
+ return aws_raise_error(AWS_ERROR_NO_SPACE);
default:
return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
new file mode 100644
index 0000000000..8dd79bf1ec
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
@@ -0,0 +1,3113 @@
+/*
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+/* cJSON */
+/* JSON parser in C. */
+
+/* disable warnings about old C89 functions in MSVC */
+#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER)
+#define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+#if defined(_MSC_VER)
+#pragma warning (push)
+/* disable warning about single line comments in system headers */
+#pragma warning (disable : 4001)
+#endif
+
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <ctype.h>
+#include <float.h>
+
+#ifdef ENABLE_LOCALES
+#include <locale.h>
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+
+#include <aws/common/external/cJSON.h>
+
+/* define our own boolean type */
+#ifdef true
+#undef true
+#endif
+#define true ((cJSON_bool)1)
+
+#ifdef false
+#undef false
+#endif
+#define false ((cJSON_bool)0)
+
+/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */
+#ifndef isinf
+#define isinf(d) (isnan((d - d)) && !isnan(d))
+#endif
+#ifndef isnan
+#define isnan(d) (d != d)
+#endif
+
+#ifndef NAN
+#ifdef _WIN32
+#define NAN sqrt(-1.0)
+#else
+#define NAN 0.0/0.0
+#endif
+#endif
+
+typedef struct {
+ const unsigned char *json;
+ size_t position;
+} error;
+static error global_error = { NULL, 0 };
+
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void)
+{
+ return (const char*) (global_error.json + global_error.position);
+}
+
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item)
+{
+ if (!cJSON_IsString(item))
+ {
+ return NULL;
+ }
+
+ return item->valuestring;
+}
+
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item)
+{
+ if (!cJSON_IsNumber(item))
+ {
+ return (double) NAN;
+ }
+
+ return item->valuedouble;
+}
+
+/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */
+#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 15)
+ #error cJSON.h and cJSON.c have different versions. Make sure that both have the same.
+#endif
+
+CJSON_PUBLIC(const char*) cJSON_Version(void)
+{
+ static char version[15];
+ snprintf(version, sizeof(version) / sizeof(char), "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH);
+
+ return version;
+}
+
+/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */
+static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2)
+{
+ if ((string1 == NULL) || (string2 == NULL))
+ {
+ return 1;
+ }
+
+ if (string1 == string2)
+ {
+ return 0;
+ }
+
+ for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++)
+ {
+ if (*string1 == '\0')
+ {
+ return 0;
+ }
+ }
+
+ return tolower(*string1) - tolower(*string2);
+}
+
+typedef struct internal_hooks
+{
+ void *(CJSON_CDECL *allocate)(size_t size);
+ void (CJSON_CDECL *deallocate)(void *pointer);
+ void *(CJSON_CDECL *reallocate)(void *pointer, size_t size);
+} internal_hooks;
+
+#if defined(_MSC_VER)
+/* work around MSVC error C2322: '...' address of dllimport '...' is not static */
+static void * CJSON_CDECL internal_malloc(size_t size)
+{
+ return malloc(size);
+}
+static void CJSON_CDECL internal_free(void *pointer)
+{
+ free(pointer);
+}
+static void * CJSON_CDECL internal_realloc(void *pointer, size_t size)
+{
+ return realloc(pointer, size);
+}
+#else
+#define internal_malloc malloc
+#define internal_free free
+#define internal_realloc realloc
+#endif
+
+/* strlen of character literals resolved at compile time */
+#define static_strlen(string_literal) (sizeof(string_literal) - sizeof(""))
+
+static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; // NOLINT
+
+static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) // NOLINT
+{
+ size_t length = 0;
+ unsigned char *copy = NULL;
+
+ if (string == NULL)
+ {
+ return NULL;
+ }
+
+ length = strlen((const char*)string) + sizeof("");
+ copy = (unsigned char*)hooks->allocate(length);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ memcpy(copy, string, length);
+
+ return copy;
+}
+
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) // NOLINT
+{
+ if (hooks == NULL)
+ {
+ /* Reset hooks */
+ global_hooks.allocate = malloc;
+ global_hooks.deallocate = free;
+ global_hooks.reallocate = realloc;
+ return;
+ }
+
+ global_hooks.allocate = malloc;
+ if (hooks->malloc_fn != NULL)
+ {
+ global_hooks.allocate = hooks->malloc_fn;
+ }
+
+ global_hooks.deallocate = free;
+ if (hooks->free_fn != NULL)
+ {
+ global_hooks.deallocate = hooks->free_fn;
+ }
+
+ /* use realloc only if both free and malloc are used */
+ global_hooks.reallocate = NULL;
+ if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free))
+ {
+ global_hooks.reallocate = realloc;
+ }
+}
+
+/* Internal constructor. */
+static cJSON *cJSON_New_Item(const internal_hooks * const hooks)
+{
+ cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON));
+ if (node)
+ {
+ memset(node, '\0', sizeof(cJSON));
+ }
+
+ return node;
+}
+
+/* Delete a cJSON structure. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item)
+{
+ cJSON *next = NULL;
+ while (item != NULL)
+ {
+ next = item->next;
+ if (!(item->type & cJSON_IsReference) && (item->child != NULL))
+ {
+ cJSON_Delete(item->child);
+ }
+ if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL))
+ {
+ global_hooks.deallocate(item->valuestring);
+ }
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ global_hooks.deallocate(item->string);
+ }
+ global_hooks.deallocate(item);
+ item = next;
+ }
+}
+
+/* get the decimal point character of the current locale */
+static unsigned char get_decimal_point(void)
+{
+#ifdef ENABLE_LOCALES
+ struct lconv *lconv = localeconv();
+ return (unsigned char) lconv->decimal_point[0];
+#else
+ return '.';
+#endif
+}
+
+typedef struct
+{
+ const unsigned char *content;
+ size_t length;
+ size_t offset;
+ size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */
+ internal_hooks hooks;
+} parse_buffer;
+
+/* check if the given size is left to read in a given parse buffer (starting with 1) */
+#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) // NOLINT
+/* check if the buffer can be accessed at the given index (starting with 0) */
+#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) // NOLINT
+#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index))
+/* get a pointer to the buffer at the position */
+#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset)
+
+/* Parse the input text to generate a number, and populate the result into item. */
+static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) // NOLINT
+{
+ double number = 0;
+ unsigned char *after_end = NULL;
+ unsigned char number_c_string[64];
+ unsigned char decimal_point = get_decimal_point(); // NOLINT
+ size_t i = 0;
+
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false; // NOLINT
+ }
+
+ /* copy the number into a temporary buffer and replace '.' with the decimal point
+ * of the current locale (for strtod)
+ * This also takes care of '\0' not necessarily being available for marking the end of the input */
+ for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++)
+ {
+ switch (buffer_at_offset(input_buffer)[i])
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '+':
+ case '-':
+ case 'e':
+ case 'E':
+ number_c_string[i] = buffer_at_offset(input_buffer)[i];
+ break;
+
+ case '.':
+ number_c_string[i] = decimal_point;
+ break;
+
+ default:
+ goto loop_end;
+ }
+ }
+loop_end:
+ number_c_string[i] = '\0';
+
+ number = strtod((const char*)number_c_string, (char**)&after_end);
+ if (number_c_string == after_end)
+ {
+ return false; /* parse_error */ // NOLINT
+ }
+
+ item->valuedouble = number;
+
+ /* use saturation in case of overflow */
+ if (number >= INT_MAX)
+ { // NOLINT
+ item->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)number;
+ }
+
+ item->type = cJSON_Number; // NOLINT
+
+ input_buffer->offset += (size_t)(after_end - number_c_string);
+ return true; // NOLINT
+}
+
+/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) // NOLINT
+{
+ if (number >= INT_MAX)
+ {
+ object->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ object->valueint = INT_MIN;
+ }
+ else
+ {
+ object->valueint = (int)number;
+ }
+
+ return object->valuedouble = number;
+}
+
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring)
+{
+ char *copy = NULL;
+ /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */
+ if (!(object->type & cJSON_String) || (object->type & cJSON_IsReference))
+ {
+ return NULL;
+ }
+ if (strlen(valuestring) <= strlen(object->valuestring))
+ {
+ size_t value_length = strlen(valuestring) + sizeof("");
+ memcpy(object->valuestring, valuestring, value_length);
+ return object->valuestring;
+ }
+ copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ if (object->valuestring != NULL)
+ {
+ cJSON_free(object->valuestring);
+ }
+ object->valuestring = copy;
+
+ return copy;
+}
+
+typedef struct
+{
+ unsigned char *buffer;
+ size_t length;
+ size_t offset;
+ size_t depth; /* current nesting depth (for formatted printing) */
+ cJSON_bool noalloc;
+ cJSON_bool format; /* is this print a formatted print */
+ internal_hooks hooks;
+} printbuffer;
+
+/* realloc printbuffer if necessary to have at least "needed" bytes more */
+static unsigned char* ensure(printbuffer * const p, size_t needed) // NOLINT
+{
+ unsigned char *newbuffer = NULL;
+ size_t newsize = 0;
+
+ if ((p == NULL) || (p->buffer == NULL))
+ {
+ return NULL;
+ }
+
+ if ((p->length > 0) && (p->offset >= p->length))
+ {
+ /* make sure that offset is valid */
+ return NULL;
+ }
+
+ if (needed > INT_MAX)
+ {
+ /* sizes bigger than INT_MAX are currently not supported */
+ return NULL;
+ }
+
+ needed += p->offset + 1;
+ if (needed <= p->length)
+ {
+ return p->buffer + p->offset;
+ }
+
+ if (p->noalloc) {
+ return NULL;
+ }
+
+ /* calculate new buffer size */
+ if (needed > (INT_MAX / 2))
+ {
+ /* overflow of int, use INT_MAX if possible */
+ if (needed <= INT_MAX)
+ {
+ newsize = INT_MAX;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ newsize = needed * 2;
+ }
+
+ if (p->hooks.reallocate != NULL)
+ {
+ /* reallocate with realloc if available */
+ newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize);
+ if (newbuffer == NULL)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+ }
+ else
+ {
+ /* otherwise reallocate manually */
+ newbuffer = (unsigned char*)p->hooks.allocate(newsize);
+ if (!newbuffer)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+
+ memcpy(newbuffer, p->buffer, p->offset + 1);
+ p->hooks.deallocate(p->buffer);
+ }
+ p->length = newsize;
+ p->buffer = newbuffer;
+
+ return newbuffer + p->offset;
+}
+
+/* calculate the new length of the string in a printbuffer and update the offset */
+static void update_offset(printbuffer * const buffer) // NOLINT
+{
+ const unsigned char *buffer_pointer = NULL;
+ if ((buffer == NULL) || (buffer->buffer == NULL))
+ {
+ return;
+ }
+ buffer_pointer = buffer->buffer + buffer->offset;
+
+ buffer->offset += strlen((const char*)buffer_pointer);
+}
+
+/* securely comparison of floating-point variables */
+static cJSON_bool compare_double(double a, double b) // NOLINT
+{
+ double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b);
+ return (fabs(a - b) <= maxVal * DBL_EPSILON);
+}
+
+/* Render the number nicely from the given item into a string. */
+static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) // NOLINT
+{
+ unsigned char *output_pointer = NULL;
+ double d = item->valuedouble;
+ int length = 0;
+ size_t i = 0;
+ unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */
+ unsigned char decimal_point = get_decimal_point(); // NOLINT
+ double test = 0.0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* This checks for NaN and Infinity */
+ if (isnan(d) || isinf(d))
+ {
+ length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "null");
+ }
+ else
+ {
+ /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */
+ length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "%1.15g", d);
+
+ /* Check whether the original double can be recovered */
+ if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d))
+ {
+ /* If not, print with 17 decimal places of precision */
+ length = snprintf((char*)number_buffer, sizeof(number_buffer) / sizeof(char), "%1.17g", d);
+ }
+ }
+
+ /* sprintf failed or buffer overrun occurred */
+ if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1)))
+ {
+ return false;
+ }
+
+ /* reserve appropriate space in the output */
+ output_pointer = ensure(output_buffer, (size_t)length + sizeof(""));
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ /* copy the printed number to the output and replace locale
+ * dependent decimal point with '.' */
+ for (i = 0; i < ((size_t)length); i++)
+ {
+ if (number_buffer[i] == decimal_point)
+ {
+ output_pointer[i] = '.';
+ continue;
+ }
+
+ output_pointer[i] = number_buffer[i];
+ }
+ output_pointer[i] = '\0';
+
+ output_buffer->offset += (size_t)length;
+
+ return true;
+}
+
+/* parse 4 digit hexadecimal number */
+static unsigned parse_hex4(const unsigned char * const input)
+{
+ unsigned int h = 0;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++)
+ {
+ /* parse digit */
+ if ((input[i] >= '0') && (input[i] <= '9'))
+ {
+ h += (unsigned int) input[i] - '0';
+ }
+ else if ((input[i] >= 'A') && (input[i] <= 'F'))
+ {
+ h += (unsigned int) 10 + input[i] - 'A';
+ }
+ else if ((input[i] >= 'a') && (input[i] <= 'f'))
+ {
+ h += (unsigned int) 10 + input[i] - 'a';
+ }
+ else /* invalid */
+ {
+ return 0;
+ }
+
+ if (i < 3)
+ {
+ /* shift left to make place for the next nibble */
+ h = h << 4;
+ }
+ }
+
+ return h;
+}
+
+/* converts a UTF-16 literal to UTF-8
+* A literal can be one or two sequences of the form \uXXXX */
+static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer)
+{
+ long unsigned int codepoint = 0;
+ unsigned int first_code = 0;
+ const unsigned char *first_sequence = input_pointer;
+ unsigned char utf8_length = 0;
+ unsigned char utf8_position = 0;
+ unsigned char sequence_length = 0;
+ unsigned char first_byte_mark = 0;
+
+ if ((input_end - first_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ /* get the first utf16 sequence */
+ first_code = parse_hex4(first_sequence + 2);
+
+ /* check that the code is valid */
+ if (((first_code >= 0xDC00) && (first_code <= 0xDFFF)))
+ {
+ goto fail;
+ }
+
+ /* UTF16 surrogate pair */
+ if ((first_code >= 0xD800) && (first_code <= 0xDBFF))
+ {
+ const unsigned char *second_sequence = first_sequence + 6;
+ unsigned int second_code = 0;
+ sequence_length = 12; /* \uXXXX\uXXXX */
+
+ if ((input_end - second_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u'))
+ {
+ /* missing second half of the surrogate pair */
+ goto fail;
+ }
+
+ /* get the second utf16 sequence */
+ second_code = parse_hex4(second_sequence + 2);
+ /* check that the code is valid */
+ if ((second_code < 0xDC00) || (second_code > 0xDFFF))
+ {
+ /* invalid second half of the surrogate pair */
+ goto fail;
+ }
+
+
+ /* calculate the unicode codepoint from the surrogate pair */
+ codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF));
+ }
+ else
+ {
+ sequence_length = 6; /* \uXXXX */
+ codepoint = first_code;
+ }
+
+ /* encode as UTF-8
+ * takes at maximum 4 bytes to encode:
+ * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ if (codepoint < 0x80)
+ {
+ /* normal ascii, encoding 0xxxxxxx */
+ utf8_length = 1;
+ }
+ else if (codepoint < 0x800)
+ {
+ /* two bytes, encoding 110xxxxx 10xxxxxx */
+ utf8_length = 2;
+ first_byte_mark = 0xC0; /* 11000000 */
+ }
+ else if (codepoint < 0x10000)
+ {
+ /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 3;
+ first_byte_mark = 0xE0; /* 11100000 */
+ }
+ else if (codepoint <= 0x10FFFF)
+ {
+ /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 4;
+ first_byte_mark = 0xF0; /* 11110000 */
+ }
+ else
+ {
+ /* invalid unicode codepoint */
+ goto fail;
+ }
+
+ /* encode as utf8 */
+ for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--)
+ {
+ /* 10xxxxxx */
+ (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF);
+ codepoint >>= 6;
+ }
+ /* encode first byte */
+ if (utf8_length > 1)
+ {
+ (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF);
+ }
+ else
+ {
+ (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F);
+ }
+
+ *output_pointer += utf8_length;
+
+ return sequence_length;
+
+fail:
+ return 0;
+}
+
+/* Parse the input text into an unescaped cinput, and populate item. */
+static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) // NOLINT
+{
+ const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1;
+ const unsigned char *input_end = buffer_at_offset(input_buffer) + 1;
+ unsigned char *output_pointer = NULL;
+ unsigned char *output = NULL;
+
+ /* not a string */
+ if (buffer_at_offset(input_buffer)[0] != '\"')
+ {
+ goto fail;
+ }
+
+ {
+ /* calculate approximate size of the output (overestimate) */
+ size_t allocation_length = 0;
+ size_t skipped_bytes = 0;
+ while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"'))
+ {
+ /* is escape sequence */
+ if (input_end[0] == '\\')
+ {
+ if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length)
+ {
+ /* prevent buffer overflow when last input character is a backslash */
+ goto fail;
+ }
+ skipped_bytes++;
+ input_end++;
+ }
+ input_end++;
+ }
+ if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"'))
+ {
+ goto fail; /* string ended unexpectedly */
+ }
+
+ /* This is at most how much we need for the output */
+ allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes;
+ output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof(""));
+ if (output == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+ }
+
+ output_pointer = output;
+ /* loop through the string literal */
+ while (input_pointer < input_end)
+ {
+ if (*input_pointer != '\\')
+ {
+ *output_pointer++ = *input_pointer++;
+ }
+ /* escape sequence */
+ else
+ {
+ unsigned char sequence_length = 2;
+ if ((input_end - input_pointer) < 1)
+ {
+ goto fail;
+ }
+
+ switch (input_pointer[1])
+ {
+ case 'b':
+ *output_pointer++ = '\b';
+ break;
+ case 'f':
+ *output_pointer++ = '\f';
+ break;
+ case 'n':
+ *output_pointer++ = '\n';
+ break;
+ case 'r':
+ *output_pointer++ = '\r';
+ break;
+ case 't':
+ *output_pointer++ = '\t';
+ break;
+ case '\"':
+ case '\\':
+ case '/':
+ *output_pointer++ = input_pointer[1];
+ break;
+
+ /* UTF-16 literal */
+ case 'u':
+ sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer);
+ if (sequence_length == 0)
+ {
+ /* failed to convert UTF16-literal to UTF-8 */
+ goto fail;
+ }
+ break;
+
+ default:
+ goto fail;
+ }
+ input_pointer += sequence_length;
+ }
+ }
+
+ /* zero terminate the output */
+ *output_pointer = '\0';
+
+ item->type = cJSON_String;
+ item->valuestring = (char*)output;
+
+ input_buffer->offset = (size_t) (input_end - input_buffer->content);
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (output != NULL)
+ {
+ input_buffer->hooks.deallocate(output);
+ }
+
+ if (input_pointer != NULL)
+ {
+ input_buffer->offset = (size_t)(input_pointer - input_buffer->content);
+ }
+
+ return false;
+}
+
+/* Render the cstring provided to an escaped version that can be printed. */
+static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) // NOLINT
+{
+ const unsigned char *input_pointer = NULL;
+ unsigned char *output = NULL;
+ unsigned char *output_pointer = NULL;
+ size_t output_length = 0;
+ /* numbers of additional characters needed for escaping */
+ size_t escape_characters = 0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* empty string */
+ if (input == NULL)
+ {
+ output = ensure(output_buffer, sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "\"\"", 3); /* NOLINT */
+
+ return true;
+ }
+
+ /* set "flag" to 1 if something needs to be escaped */
+ for (input_pointer = input; *input_pointer; input_pointer++)
+ {
+ switch (*input_pointer)
+ {
+ case '\"':
+ case '\\':
+ case '\b':
+ case '\f':
+ case '\n':
+ case '\r':
+ case '\t':
+ /* one character escape sequence */
+ escape_characters++;
+ break;
+ default:
+ if (*input_pointer < 32)
+ {
+ /* UTF-16 escape sequence uXXXX */
+ escape_characters += 5;
+ }
+ break;
+ }
+ }
+ output_length = (size_t)(input_pointer - input) + escape_characters;
+
+ output = ensure(output_buffer, output_length + sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+
+ /* no characters have to be escaped */
+ if (escape_characters == 0)
+ {
+ output[0] = '\"';
+ memcpy(output + 1, input, output_length);
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+ }
+
+ output[0] = '\"';
+ output_pointer = output + 1;
+ /* copy the string */
+ for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++)
+ {
+ if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\'))
+ {
+ /* normal character, copy */
+ *output_pointer = *input_pointer;
+ }
+ else
+ {
+ /* character needs to be escaped */
+ *output_pointer++ = '\\';
+ switch (*input_pointer)
+ {
+ case '\\':
+ *output_pointer = '\\';
+ break;
+ case '\"':
+ *output_pointer = '\"';
+ break;
+ case '\b':
+ *output_pointer = 'b';
+ break;
+ case '\f':
+ *output_pointer = 'f';
+ break;
+ case '\n':
+ *output_pointer = 'n';
+ break;
+ case '\r':
+ *output_pointer = 'r';
+ break;
+ case '\t':
+ *output_pointer = 't';
+ break;
+ default:
+ /* escape and print as unicode codepoint */
+ snprintf((char*)output_pointer, 6 * sizeof(char), "u%04x", *input_pointer);
+ output_pointer += 4;
+ break;
+ }
+ }
+ }
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+}
+
+/* Invoke print_string_ptr (which is useful) on an item. */
+static cJSON_bool print_string(const cJSON * const item, printbuffer * const p)
+{
+ return print_string_ptr((unsigned char*)item->valuestring, p);
+}
+
+/* Predeclare these prototypes. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer);
+
+/* Utility to jump whitespace and cr/lf */
+static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) // NOLINT
+{
+ if ((buffer == NULL) || (buffer->content == NULL))
+ {
+ return NULL;
+ }
+
+ if (cannot_access_at_index(buffer, 0))
+ {
+ return buffer;
+ }
+
+ while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32))
+ {
+ buffer->offset++;
+ }
+
+ if (buffer->offset == buffer->length)
+ {
+ buffer->offset--;
+ }
+
+ return buffer;
+}
+
+/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */
+static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) // NOLINT
+{
+ if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0))
+ {
+ return NULL;
+ }
+
+ if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0))
+ {
+ buffer->offset += 3;
+ }
+
+ return buffer;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ size_t buffer_length;
+
+ if (NULL == value)
+ {
+ return NULL;
+ }
+
+ /* Adding null character size due to require_null_terminated. */
+ buffer_length = strlen(value) + sizeof("");
+
+ return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated);
+}
+
+/* Parse an object - create a new root, and populate. */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } };
+ cJSON *item = NULL;
+
+ /* reset error position */
+ global_error.json = NULL;
+ global_error.position = 0;
+
+ if (value == NULL || 0 == buffer_length)
+ {
+ goto fail;
+ }
+
+ buffer.content = (const unsigned char*)value;
+ buffer.length = buffer_length;
+ buffer.offset = 0;
+ buffer.hooks = global_hooks;
+
+ item = cJSON_New_Item(&global_hooks);
+ if (item == NULL) /* memory fail */
+ {
+ goto fail;
+ }
+
+ if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer))))
+ {
+ /* parse failure. ep is set. */
+ goto fail;
+ }
+
+ /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */
+ if (require_null_terminated)
+ {
+ buffer_skip_whitespace(&buffer);
+ if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0')
+ {
+ goto fail;
+ }
+ }
+ if (return_parse_end)
+ {
+ *return_parse_end = (const char*)buffer_at_offset(&buffer);
+ }
+
+ return item;
+
+fail:
+ if (item != NULL)
+ {
+ cJSON_Delete(item);
+ }
+
+ if (value != NULL)
+ {
+ error local_error;
+ local_error.json = (const unsigned char*)value;
+ local_error.position = 0;
+
+ if (buffer.offset < buffer.length)
+ {
+ local_error.position = buffer.offset;
+ }
+ else if (buffer.length > 0)
+ {
+ local_error.position = buffer.length - 1;
+ }
+
+ if (return_parse_end != NULL)
+ {
+ *return_parse_end = (const char*)local_error.json + local_error.position;
+ }
+
+ global_error = local_error;
+ }
+
+ return NULL;
+}
+
+/* Default options for cJSON_Parse */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value)
+{
+ return cJSON_ParseWithOpts(value, 0, 0);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length)
+{
+ return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0);
+}
+
+#define cjson_min(a, b) (((a) < (b)) ? (a) : (b))
+
+static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks)
+{
+ static const size_t default_buffer_size = 256;
+ printbuffer buffer[1];
+ unsigned char *printed = NULL;
+
+ memset(buffer, 0, sizeof(buffer));
+
+ /* create buffer */
+ buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size);
+ buffer->length = default_buffer_size;
+ buffer->format = format;
+ buffer->hooks = *hooks;
+ if (buffer->buffer == NULL)
+ {
+ goto fail;
+ }
+
+ /* print the value */
+ if (!print_value(item, buffer))
+ {
+ goto fail;
+ }
+ update_offset(buffer);
+
+ /* check if reallocate is available */
+ if (hooks->reallocate != NULL)
+ {
+ printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1);
+ if (printed == NULL) {
+ goto fail;
+ }
+ buffer->buffer = NULL;
+ }
+ else /* otherwise copy the JSON over to a new buffer */
+ {
+ printed = (unsigned char*) hooks->allocate(buffer->offset + 1);
+ if (printed == NULL)
+ {
+ goto fail;
+ }
+ memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1));
+ printed[buffer->offset] = '\0'; /* just to be sure */
+
+ /* free the buffer */
+ hooks->deallocate(buffer->buffer);
+ }
+
+ return printed;
+
+fail:
+ if (buffer->buffer != NULL)
+ {
+ hooks->deallocate(buffer->buffer);
+ }
+
+ if (printed != NULL)
+ {
+ hooks->deallocate(printed);
+ }
+
+ return NULL;
+}
+
+/* Render a cJSON item/entity/structure to text. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item)
+{
+ return (char*)print(item, true, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item)
+{
+ return (char*)print(item, false, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if (prebuffer < 0)
+ {
+ return NULL;
+ }
+
+ p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer);
+ if (!p.buffer)
+ {
+ return NULL;
+ }
+
+ p.length = (size_t)prebuffer;
+ p.offset = 0;
+ p.noalloc = false;
+ p.format = fmt;
+ p.hooks = global_hooks;
+
+ if (!print_value(item, &p))
+ {
+ global_hooks.deallocate(p.buffer);
+ return NULL;
+ }
+
+ return (char*)p.buffer;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if ((length < 0) || (buffer == NULL))
+ {
+ return false;
+ }
+
+ p.buffer = (unsigned char*)buffer;
+ p.length = (size_t)length;
+ p.offset = 0;
+ p.noalloc = true;
+ p.format = format;
+ p.hooks = global_hooks;
+
+ return print_value(item, &p);
+}
+
+/* Parser core - when encountering text, process appropriately. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer)
+{
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false; /* no input */
+ }
+
+ /* parse the different types of values */
+ /* null */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0))
+ {
+ item->type = cJSON_NULL;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* false */
+ if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0))
+ {
+ item->type = cJSON_False;
+ input_buffer->offset += 5;
+ return true;
+ }
+ /* true */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0))
+ {
+ item->type = cJSON_True;
+ item->valueint = 1;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* string */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"'))
+ {
+ return parse_string(item, input_buffer);
+ }
+ /* number */
+ if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9'))))
+ {
+ return parse_number(item, input_buffer);
+ }
+ /* array */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '['))
+ {
+ return parse_array(item, input_buffer);
+ }
+ /* object */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{'))
+ {
+ return parse_object(item, input_buffer);
+ }
+
+ return false;
+}
+
+/* Render a value to text. */
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output = NULL;
+
+ if ((item == NULL) || (output_buffer == NULL))
+ {
+ return false;
+ }
+
+ switch ((item->type) & 0xFF)
+ {
+ case cJSON_NULL:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "null", 5); /* NOLINT */
+ return true;
+
+ case cJSON_False:
+ output = ensure(output_buffer, 6);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "false", 6); /* NOLINT */
+ return true;
+
+ case cJSON_True:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, "true", 5); /* NOLINT */
+ return true;
+
+ case cJSON_Number:
+ return print_number(item, output_buffer);
+
+ case cJSON_Raw:
+ {
+ size_t raw_length = 0;
+ if (item->valuestring == NULL)
+ {
+ return false;
+ }
+
+ raw_length = strlen(item->valuestring) + sizeof("");
+ output = ensure(output_buffer, raw_length);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, item->valuestring, raw_length);
+ return true;
+ }
+
+ case cJSON_String:
+ return print_string(item, output_buffer);
+
+ case cJSON_Array:
+ return print_array(item, output_buffer);
+
+ case cJSON_Object:
+ return print_object(item, output_buffer);
+
+ default:
+ return false;
+ }
+}
+
+/* Build an array from input text. */
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* head of the linked list */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (buffer_at_offset(input_buffer)[0] != '[')
+ {
+ /* not an array */
+ goto fail;
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']'))
+ {
+ /* empty array */
+ goto success;
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse next value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']')
+ {
+ goto fail; /* expected end of array */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Array;
+ item->child = head;
+
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an array to text */
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_element = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output array. */
+ /* opening square bracket */
+ output_pointer = ensure(output_buffer, 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer = '[';
+ output_buffer->offset++;
+ output_buffer->depth++;
+
+ while (current_element != NULL)
+ {
+ if (!print_value(current_element, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+ if (current_element->next)
+ {
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ',';
+ if(output_buffer->format)
+ {
+ *output_pointer++ = ' ';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+ }
+ current_element = current_element->next;
+ }
+
+ output_pointer = ensure(output_buffer, 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ']';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Build an object from the text. */
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* linked list head */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{'))
+ {
+ goto fail; /* not an object */
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}'))
+ {
+ goto success; /* empty object */
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse the name of the child */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_string(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse name */
+ }
+ buffer_skip_whitespace(input_buffer);
+
+ /* swap valuestring and string, because we parsed the name */
+ current_item->string = current_item->valuestring;
+ current_item->valuestring = NULL;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':'))
+ {
+ goto fail; /* invalid object */
+ }
+
+ /* parse the value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}'))
+ {
+ goto fail; /* expected end of object */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Object;
+ item->child = head;
+
+ input_buffer->offset++;
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an object to text. */
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_item = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output: */
+ length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer++ = '{';
+ output_buffer->depth++;
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ output_buffer->offset += length;
+
+ while (current_item)
+ {
+ if (output_buffer->format)
+ {
+ size_t i;
+ output_pointer = ensure(output_buffer, output_buffer->depth);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ for (i = 0; i < output_buffer->depth; i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += output_buffer->depth;
+ }
+
+ /* print key */
+ if (!print_string_ptr((unsigned char*)current_item->string, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ':';
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += length;
+
+ /* print value */
+ if (!print_value(current_item, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ /* print comma if not last */
+ length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0));
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (current_item->next)
+ {
+ *output_pointer++ = ',';
+ }
+
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+
+ current_item = current_item->next;
+ }
+
+ output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (output_buffer->format)
+ {
+ size_t i;
+ for (i = 0; i < (output_buffer->depth - 1); i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ }
+ *output_pointer++ = '}';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Get Array size/item / object item. */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array)
+{
+ cJSON *child = NULL;
+ size_t size = 0;
+
+ if (array == NULL)
+ {
+ return 0;
+ }
+
+ child = array->child;
+
+ while(child != NULL)
+ {
+ size++;
+ child = child->next;
+ }
+
+ /* FIXME: Can overflow here. Cannot be fixed without breaking the API */
+
+ return (int)size;
+}
+
+static cJSON* get_array_item(const cJSON *array, size_t index)
+{
+ cJSON *current_child = NULL;
+
+ if (array == NULL)
+ {
+ return NULL;
+ }
+
+ current_child = array->child;
+ while ((current_child != NULL) && (index > 0))
+ {
+ index--;
+ current_child = current_child->next;
+ }
+
+ return current_child;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index)
+{
+ if (index < 0)
+ {
+ return NULL;
+ }
+
+ return get_array_item(array, (size_t)index);
+}
+
+static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
+{
+ cJSON *current_element = NULL;
+
+ if ((object == NULL) || (name == NULL))
+ {
+ return NULL;
+ }
+
+ current_element = object->child;
+ if (case_sensitive)
+ {
+ while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+ else
+ {
+ while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+
+ if ((current_element == NULL) || (current_element->string == NULL)) {
+ return NULL;
+ }
+
+ return current_element;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, false);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string)
+{
+ return cJSON_GetObjectItem(object, string) ? 1 : 0;
+}
+
+/* Utility for array list handling. */
+static void suffix_object(cJSON *prev, cJSON *item)
+{
+ prev->next = item;
+ item->prev = prev;
+}
+
+/* Utility for handling references. */
+static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks)
+{
+ cJSON *reference = NULL;
+ if (item == NULL)
+ {
+ return NULL;
+ }
+
+ reference = cJSON_New_Item(hooks);
+ if (reference == NULL)
+ {
+ return NULL;
+ }
+
+ memcpy(reference, item, sizeof(cJSON));
+ reference->string = NULL;
+ reference->type |= cJSON_IsReference;
+ reference->next = reference->prev = NULL;
+ return reference;
+}
+
+static cJSON_bool add_item_to_array(cJSON *array, cJSON *item)
+{
+ cJSON *child = NULL;
+
+ if ((item == NULL) || (array == NULL) || (array == item))
+ {
+ return false;
+ }
+
+ child = array->child;
+ /*
+ * To find the last item in array quickly, we use prev in array
+ */
+ if (child == NULL)
+ {
+ /* list is empty, start new one */
+ array->child = item;
+ item->prev = item;
+ item->next = NULL;
+ }
+ else
+ {
+ /* append to the end */
+ if (child->prev)
+ {
+ suffix_object(child->prev, item);
+ array->child->prev = item;
+ }
+ }
+
+ return true;
+}
+
+/* Add item to array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item)
+{
+ return add_item_to_array(array, item);
+}
+
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic push
+#endif
+#ifdef __GNUC__
+ #if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 1)))
+ #pragma GCC diagnostic ignored "-Wcast-qual"
+ #endif
+#endif
+/* helper function to cast away const */
+static void* cast_away_const(const void* string)
+{
+ return (void*)string;
+}
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic pop
+#endif
+
+
+static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key)
+{
+ char *new_key = NULL;
+ int new_type = cJSON_Invalid;
+
+ if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item))
+ {
+ return false;
+ }
+
+ if (constant_key)
+ {
+ new_key = (char*)cast_away_const(string);
+ new_type = item->type | cJSON_StringIsConst;
+ }
+ else
+ {
+ new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks);
+ if (new_key == NULL)
+ {
+ return false;
+ }
+
+ new_type = item->type & ~cJSON_StringIsConst;
+ }
+
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ hooks->deallocate(item->string);
+ }
+
+ item->string = new_key;
+ item->type = new_type;
+
+ return add_item_to_array(object, item);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, false);
+}
+
+/* Add an item to an object with constant string as key */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item)
+{
+ if (array == NULL)
+ {
+ return false;
+ }
+
+ return add_item_to_array(array, create_reference(item, &global_hooks));
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item)
+{
+ if ((object == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false);
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name)
+{
+ cJSON *null = cJSON_CreateNull();
+ if (add_item_to_object(object, name, null, &global_hooks, false))
+ {
+ return null;
+ }
+
+ cJSON_Delete(null);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name)
+{
+ cJSON *true_item = cJSON_CreateTrue();
+ if (add_item_to_object(object, name, true_item, &global_hooks, false))
+ {
+ return true_item;
+ }
+
+ cJSON_Delete(true_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name)
+{
+ cJSON *false_item = cJSON_CreateFalse();
+ if (add_item_to_object(object, name, false_item, &global_hooks, false))
+ {
+ return false_item;
+ }
+
+ cJSON_Delete(false_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean)
+{
+ cJSON *bool_item = cJSON_CreateBool(boolean);
+ if (add_item_to_object(object, name, bool_item, &global_hooks, false))
+ {
+ return bool_item;
+ }
+
+ cJSON_Delete(bool_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number)
+{
+ cJSON *number_item = cJSON_CreateNumber(number);
+ if (add_item_to_object(object, name, number_item, &global_hooks, false))
+ {
+ return number_item;
+ }
+
+ cJSON_Delete(number_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string)
+{
+ cJSON *string_item = cJSON_CreateString(string);
+ if (add_item_to_object(object, name, string_item, &global_hooks, false))
+ {
+ return string_item;
+ }
+
+ cJSON_Delete(string_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw)
+{
+ cJSON *raw_item = cJSON_CreateRaw(raw);
+ if (add_item_to_object(object, name, raw_item, &global_hooks, false))
+ {
+ return raw_item;
+ }
+
+ cJSON_Delete(raw_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name)
+{
+ cJSON *object_item = cJSON_CreateObject();
+ if (add_item_to_object(object, name, object_item, &global_hooks, false))
+ {
+ return object_item;
+ }
+
+ cJSON_Delete(object_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name)
+{
+ cJSON *array = cJSON_CreateArray();
+ if (add_item_to_object(object, name, array, &global_hooks, false))
+ {
+ return array;
+ }
+
+ cJSON_Delete(array);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item)
+{
+ if ((parent == NULL) || (item == NULL))
+ {
+ return NULL;
+ }
+
+ if (item != parent->child)
+ {
+ /* not the first element */
+ item->prev->next = item->next;
+ }
+ if (item->next != NULL)
+ {
+ /* not the last element */
+ item->next->prev = item->prev;
+ }
+
+ if (item == parent->child)
+ {
+ /* first element */
+ parent->child = item->next;
+ }
+ else if (item->next == NULL)
+ {
+ /* last element */
+ parent->child->prev = item->prev;
+ }
+
+ /* make sure the detached item doesn't point anywhere anymore */
+ item->prev = NULL;
+ item->next = NULL;
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which)
+{
+ if (which < 0)
+ {
+ return NULL;
+ }
+
+ return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which)
+{
+ cJSON_Delete(cJSON_DetachItemFromArray(array, which));
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItem(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObject(object, string));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string));
+}
+
+/* Replace array/object items with new ones. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ cJSON *after_inserted = NULL;
+
+ if (which < 0)
+ {
+ return false;
+ }
+
+ after_inserted = get_array_item(array, (size_t)which);
+ if (after_inserted == NULL)
+ {
+ return add_item_to_array(array, newitem);
+ }
+
+ newitem->next = after_inserted;
+ newitem->prev = after_inserted->prev;
+ after_inserted->prev = newitem;
+ if (after_inserted == array->child)
+ {
+ array->child = newitem;
+ }
+ else
+ {
+ newitem->prev->next = newitem;
+ }
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement)
+{
+ if ((parent == NULL) || (replacement == NULL) || (item == NULL))
+ {
+ return false;
+ }
+
+ if (replacement == item)
+ {
+ return true;
+ }
+
+ replacement->next = item->next;
+ replacement->prev = item->prev;
+
+ if (replacement->next != NULL)
+ {
+ replacement->next->prev = replacement;
+ }
+ if (parent->child == item)
+ {
+ if (parent->child->prev == parent->child)
+ {
+ replacement->prev = replacement;
+ }
+ parent->child = replacement;
+ }
+ else
+ { /*
+ * To find the last item in array quickly, we use prev in array.
+ * We can't modify the last item's next pointer where this item was the parent's child
+ */
+ if (replacement->prev != NULL)
+ {
+ replacement->prev->next = replacement;
+ }
+ if (replacement->next == NULL)
+ {
+ parent->child->prev = replacement;
+ }
+ }
+
+ item->next = NULL;
+ item->prev = NULL;
+ cJSON_Delete(item);
+
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ if (which < 0)
+ {
+ return false;
+ }
+
+ return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem);
+}
+
+static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive)
+{
+ if ((replacement == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ /* replace the name in the replacement */
+ if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL))
+ {
+ cJSON_free(replacement->string);
+ }
+ replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ replacement->type &= ~cJSON_StringIsConst;
+
+ return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, false);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, true);
+}
+
+/* Create basic types: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_NULL;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_True;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = boolean ? cJSON_True : cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Number;
+ item->valuedouble = num;
+
+ /* use saturation in case of overflow */
+ if (num >= INT_MAX)
+ {
+ item->valueint = INT_MAX;
+ }
+ else if (num <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)num;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_String;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL)
+ {
+ item->type = cJSON_String | cJSON_IsReference;
+ item->valuestring = (char*)cast_away_const(string);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Object | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Array | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Raw;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type=cJSON_Array;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item)
+ {
+ item->type = cJSON_Object;
+ }
+
+ return item;
+}
+
+/* Create Arrays: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if (!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber((double)numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (strings == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for (i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateString(strings[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p,n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+/* Duplication */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse)
+{
+ cJSON *newitem = NULL;
+ cJSON *child = NULL;
+ cJSON *next = NULL;
+ cJSON *newchild = NULL;
+
+ /* Bail on bad ptr */
+ if (!item)
+ {
+ goto fail;
+ }
+ /* Create new item */
+ newitem = cJSON_New_Item(&global_hooks);
+ if (!newitem)
+ {
+ goto fail;
+ }
+ /* Copy over all vars */
+ newitem->type = item->type & (~cJSON_IsReference);
+ newitem->valueint = item->valueint;
+ newitem->valuedouble = item->valuedouble;
+ if (item->valuestring)
+ {
+ newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks);
+ if (!newitem->valuestring)
+ {
+ goto fail;
+ }
+ }
+ if (item->string)
+ {
+ newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks);
+ if (!newitem->string)
+ {
+ goto fail;
+ }
+ }
+ /* If non-recursive, then we're done! */
+ if (!recurse)
+ {
+ return newitem;
+ }
+ /* Walk the ->next chain for the child. */
+ child = item->child;
+ while (child != NULL)
+ {
+ newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */
+ if (!newchild)
+ {
+ goto fail;
+ }
+ if (next != NULL)
+ {
+ /* If newitem->child already set, then crosswire ->prev and ->next and move on */
+ next->next = newchild;
+ newchild->prev = next;
+ next = newchild;
+ }
+ else
+ {
+ /* Set newitem->child and move to it */
+ newitem->child = newchild;
+ next = newchild;
+ }
+ child = child->next;
+ }
+ if (newitem && newitem->child)
+ {
+ newitem->child->prev = newchild;
+ }
+
+ return newitem;
+
+fail:
+ if (newitem != NULL)
+ {
+ cJSON_Delete(newitem);
+ }
+
+ return NULL;
+}
+
+static void skip_oneline_comment(char **input)
+{
+ *input += static_strlen("//");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if ((*input)[0] == '\n') {
+ *input += static_strlen("\n");
+ return;
+ }
+ }
+}
+
+static void skip_multiline_comment(char **input)
+{
+ *input += static_strlen("/*");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if (((*input)[0] == '*') && ((*input)[1] == '/'))
+ {
+ *input += static_strlen("*/");
+ return;
+ }
+ }
+}
+
+static void minify_string(char **input, char **output) {
+ (*output)[0] = (*input)[0];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+
+
+ for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) {
+ (*output)[0] = (*input)[0];
+
+ if ((*input)[0] == '\"') {
+ (*output)[0] = '\"';
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ return;
+ } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) {
+ (*output)[1] = (*input)[1];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ }
+ }
+}
+
+CJSON_PUBLIC(void) cJSON_Minify(char *json)
+{
+ char *into = json;
+
+ if (json == NULL)
+ {
+ return;
+ }
+
+ while (json[0] != '\0')
+ {
+ switch (json[0])
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ json++;
+ break;
+
+ case '/':
+ if (json[1] == '/')
+ {
+ skip_oneline_comment(&json);
+ }
+ else if (json[1] == '*')
+ {
+ skip_multiline_comment(&json);
+ } else {
+ json++;
+ }
+ break;
+
+ case '\"':
+ minify_string(&json, (char**)&into);
+ break;
+
+ default:
+ into[0] = json[0];
+ json++;
+ into++;
+ }
+ }
+
+ /* and null-terminate. */
+ *into = '\0';
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Invalid;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_False;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xff) == cJSON_True;
+}
+
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & (cJSON_True | cJSON_False)) != 0;
+}
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_NULL;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Number;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_String;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Array;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Object;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Raw;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive)
+{
+ if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)))
+ {
+ return false;
+ }
+
+ /* check if type is valid */
+ switch (a->type & 0xFF)
+ {
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ case cJSON_Number:
+ case cJSON_String:
+ case cJSON_Raw:
+ case cJSON_Array:
+ case cJSON_Object:
+ break;
+
+ default:
+ return false;
+ }
+
+ /* identical objects are equal */
+ if (a == b)
+ {
+ return true;
+ }
+
+ switch (a->type & 0xFF)
+ {
+ /* in these cases and equal type is enough */
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ return true;
+
+ case cJSON_Number:
+ if (compare_double(a->valuedouble, b->valuedouble))
+ {
+ return true;
+ }
+ return false;
+
+ case cJSON_String:
+ case cJSON_Raw:
+ if ((a->valuestring == NULL) || (b->valuestring == NULL))
+ {
+ return false;
+ }
+ if (strcmp(a->valuestring, b->valuestring) == 0)
+ {
+ return true;
+ }
+
+ return false;
+
+ case cJSON_Array:
+ {
+ cJSON *a_element = a->child;
+ cJSON *b_element = b->child;
+
+ for (; (a_element != NULL) && (b_element != NULL);)
+ {
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+
+ a_element = a_element->next;
+ b_element = b_element->next;
+ }
+
+ /* one of the arrays is longer than the other */
+ if (a_element != b_element) {
+ return false;
+ }
+
+ return true;
+ }
+
+ case cJSON_Object:
+ {
+ cJSON *a_element = NULL;
+ cJSON *b_element = NULL;
+ cJSON_ArrayForEach(a_element, a)
+ {
+ /* TODO This has O(n^2) runtime, which is horrible! */
+ b_element = get_object_item(b, a_element->string, case_sensitive);
+ if (b_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ /* doing this twice, once on a and b to prevent true comparison if a subset of b
+ * TODO: Do this the proper way, this is just a fix for now */
+ cJSON_ArrayForEach(b_element, b)
+ {
+ a_element = get_object_item(a, b_element->string, case_sensitive);
+ if (a_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(b_element, a_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size)
+{
+ return global_hooks.allocate(size);
+}
+
+CJSON_PUBLIC(void) cJSON_free(void *object)
+{
+ global_hooks.deallocate(object);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/file.c b/contrib/restricted/aws/aws-c-common/source/file.c
new file mode 100644
index 0000000000..a64453fd23
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/file.c
@@ -0,0 +1,171 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/file.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/logging.h>
+#include <aws/common/string.h>
+
+#include <errno.h>
+
+FILE *aws_fopen(const char *file_path, const char *mode) {
+ struct aws_string *file_path_str = aws_string_new_from_c_str(aws_default_allocator(), file_path);
+ struct aws_string *mode_str = aws_string_new_from_c_str(aws_default_allocator(), mode);
+
+ FILE *file = aws_fopen_safe(file_path_str, mode_str);
+ aws_string_destroy(mode_str);
+ aws_string_destroy(file_path_str);
+
+ return file;
+}
+
+int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) {
+ AWS_ZERO_STRUCT(*out_buf);
+ FILE *fp = aws_fopen(filename, "rb");
+
+ if (fp) {
+ if (fseek(fp, 0L, SEEK_END)) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno);
+ fclose(fp);
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ size_t allocation_size = (size_t)ftell(fp) + 1;
+ /* Tell the user that we allocate here and if success they're responsible for the free. */
+ if (aws_byte_buf_init(out_buf, alloc, allocation_size)) {
+ fclose(fp);
+ return AWS_OP_ERR;
+ }
+
+ /* Ensure compatibility with null-terminated APIs, but don't consider
+ * the null terminator part of the length of the payload */
+ out_buf->len = out_buf->capacity - 1;
+ out_buf->buffer[out_buf->len] = 0;
+
+ if (fseek(fp, 0L, SEEK_SET)) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno);
+ aws_byte_buf_clean_up(out_buf);
+ fclose(fp);
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ size_t read = fread(out_buf->buffer, 1, out_buf->len, fp);
+ fclose(fp);
+ if (read < out_buf->len) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to read file %s with errno %d", filename, errno);
+ aws_secure_zero(out_buf->buffer, out_buf->len);
+ aws_byte_buf_clean_up(out_buf);
+ return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file %s with errno %d", filename, errno);
+
+ return aws_translate_and_raise_io_error(errno);
+}
+
+bool aws_is_any_directory_separator(char value) {
+ return value == '\\' || value == '/';
+}
+
+struct aws_directory_iterator {
+ struct aws_linked_list list_data;
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node *current_node;
+};
+
+struct directory_entry_value {
+ struct aws_directory_entry entry;
+ struct aws_byte_buf path;
+ struct aws_byte_buf relative_path;
+ struct aws_linked_list_node node;
+};
+
+static bool s_directory_iterator_directory_entry(const struct aws_directory_entry *entry, void *user_data) {
+ struct aws_directory_iterator *iterator = user_data;
+ struct directory_entry_value *value = aws_mem_calloc(iterator->allocator, 1, sizeof(struct directory_entry_value));
+
+ value->entry = *entry;
+ aws_byte_buf_init_copy_from_cursor(&value->path, iterator->allocator, entry->path);
+ value->entry.path = aws_byte_cursor_from_buf(&value->path);
+ aws_byte_buf_init_copy_from_cursor(&value->relative_path, iterator->allocator, entry->relative_path);
+ value->entry.relative_path = aws_byte_cursor_from_buf(&value->relative_path);
+ aws_linked_list_push_back(&iterator->list_data, &value->node);
+
+ return true;
+}
+
+struct aws_directory_iterator *aws_directory_entry_iterator_new(
+ struct aws_allocator *allocator,
+ const struct aws_string *path) {
+ struct aws_directory_iterator *iterator = aws_mem_acquire(allocator, sizeof(struct aws_directory_iterator));
+ iterator->allocator = allocator;
+ aws_linked_list_init(&iterator->list_data);
+
+ /* the whole point of this iterator is to avoid recursion, so let's do that by passing recurse as false. */
+ if (AWS_OP_SUCCESS ==
+ aws_directory_traverse(allocator, path, false, s_directory_iterator_directory_entry, iterator)) {
+ if (!aws_linked_list_empty(&iterator->list_data)) {
+ iterator->current_node = aws_linked_list_front(&iterator->list_data);
+ }
+ return iterator;
+ }
+
+ aws_mem_release(allocator, iterator);
+ return NULL;
+}
+
+int aws_directory_entry_iterator_next(struct aws_directory_iterator *iterator) {
+ struct aws_linked_list_node *node = iterator->current_node;
+
+ if (!node || node->next == aws_linked_list_end(&iterator->list_data)) {
+ return aws_raise_error(AWS_ERROR_LIST_EMPTY);
+ }
+
+ iterator->current_node = aws_linked_list_next(node);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_directory_entry_iterator_previous(struct aws_directory_iterator *iterator) {
+ struct aws_linked_list_node *node = iterator->current_node;
+
+ if (!node || node == aws_linked_list_begin(&iterator->list_data)) {
+ return aws_raise_error(AWS_ERROR_LIST_EMPTY);
+ }
+
+ iterator->current_node = aws_linked_list_prev(node);
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_directory_entry_iterator_destroy(struct aws_directory_iterator *iterator) {
+ while (!aws_linked_list_empty(&iterator->list_data)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&iterator->list_data);
+ struct directory_entry_value *value = AWS_CONTAINER_OF(node, struct directory_entry_value, node);
+
+ aws_byte_buf_clean_up(&value->path);
+ aws_byte_buf_clean_up(&value->relative_path);
+
+ aws_mem_release(iterator->allocator, value);
+ }
+
+ aws_mem_release(iterator->allocator, iterator);
+}
+
+const struct aws_directory_entry *aws_directory_entry_iterator_get_value(
+ const struct aws_directory_iterator *iterator) {
+ struct aws_linked_list_node *node = iterator->current_node;
+
+ if (!iterator->current_node) {
+ return NULL;
+ }
+
+ struct directory_entry_value *value = AWS_CONTAINER_OF(node, struct directory_entry_value, node);
+ return &value->entry;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/hash_table.c b/contrib/restricted/aws/aws-c-common/source/hash_table.c
index a8125a2df1..88926e48f9 100644
--- a/contrib/restricted/aws/aws-c-common/source/hash_table.c
+++ b/contrib/restricted/aws/aws-c-common/source/hash_table.c
@@ -222,7 +222,6 @@ int aws_hash_table_init(
AWS_PRECONDITION(alloc != NULL);
AWS_PRECONDITION(hash_fn != NULL);
AWS_PRECONDITION(equals_fn != NULL);
-
struct hash_table_state template;
template.hash_fn = hash_fn;
template.equals_fn = equals_fn;
@@ -715,6 +714,13 @@ int aws_hash_table_foreach(
for (struct aws_hash_iter iter = aws_hash_iter_begin(map); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) {
int rv = callback(context, &iter.element);
+ if (rv & AWS_COMMON_HASH_TABLE_ITER_ERROR) {
+ int error = aws_last_error();
+ if (error == AWS_ERROR_SUCCESS) {
+ aws_raise_error(AWS_ERROR_UNKNOWN);
+ }
+ return AWS_OP_ERR;
+ }
if (rv & AWS_COMMON_HASH_TABLE_ITER_DELETE) {
aws_hash_iter_delete(&iter, false);
@@ -1000,7 +1006,7 @@ bool aws_hash_callback_string_eq(const void *a, const void *b) {
AWS_PRECONDITION(aws_string_is_valid(a));
AWS_PRECONDITION(aws_string_is_valid(b));
bool rval = aws_string_eq(a, b);
- AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
+ AWS_RETURN_WITH_POSTCONDITION(rval, aws_string_is_valid(a) && aws_string_is_valid(b));
}
void aws_hash_callback_string_destroy(void *a) {
diff --git a/contrib/restricted/aws/aws-c-common/source/json.c b/contrib/restricted/aws/aws-c-common/source/json.c
new file mode 100644
index 0000000000..0f1b810be5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/json.c
@@ -0,0 +1,344 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/string.h>
+
+#include <aws/common/json.h>
+
+#include <aws/common/external/cJSON.h>
+
+static struct aws_allocator *s_aws_json_module_allocator = NULL;
+static bool s_aws_json_module_initialized = false;
+
+struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string) {
+ struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string);
+ void *ret_val = cJSON_CreateString(aws_string_c_str(tmp));
+ aws_string_destroy_secure(tmp);
+ return ret_val;
+}
+
+struct aws_json_value *aws_json_value_new_number(struct aws_allocator *allocator, double number) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateNumber(number);
+}
+
+struct aws_json_value *aws_json_value_new_array(struct aws_allocator *allocator) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateArray();
+}
+
+struct aws_json_value *aws_json_value_new_boolean(struct aws_allocator *allocator, bool boolean) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateBool(boolean);
+}
+
+struct aws_json_value *aws_json_value_new_null(struct aws_allocator *allocator) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateNull();
+}
+
+struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator) {
+ (void)allocator; // prevent warnings over unused parameter
+ return (void *)cJSON_CreateObject();
+}
+
+int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsString(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *output = aws_byte_cursor_from_c_str(cJSON_GetStringValue(cjson));
+ return AWS_OP_SUCCESS;
+}
+
+int aws_json_value_get_number(const struct aws_json_value *value, double *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsNumber(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *output = cjson->valuedouble;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsBool(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *output = cjson->type == cJSON_True;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_json_value_add_to_object(
+ struct aws_json_value *object,
+ struct aws_byte_cursor key,
+ struct aws_json_value *value) {
+
+ int result = AWS_OP_ERR;
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+
+ struct cJSON *cjson_value = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson_value)) {
+ result = aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+ if (cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+
+ cJSON_AddItemToObject(cjson, aws_string_c_str(tmp), cjson_value);
+ result = AWS_OP_SUCCESS;
+
+done:
+ aws_string_destroy_secure(tmp);
+ return result;
+}
+
+struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_value *object, struct aws_byte_cursor key) {
+
+ void *return_value = NULL;
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+ if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+
+ return_value = (void *)cJSON_GetObjectItem(cjson, aws_string_c_str(tmp));
+
+done:
+ aws_string_destroy_secure(tmp);
+ return return_value;
+}
+
+bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte_cursor key) {
+
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+ bool result = false;
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ goto done;
+ }
+ if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+ result = true;
+
+done:
+ aws_string_destroy_secure(tmp);
+ return result;
+}
+
+int aws_json_value_remove_from_object(struct aws_json_value *object, struct aws_byte_cursor key) {
+
+ int result = AWS_OP_ERR;
+ struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
+
+ struct cJSON *cjson = (struct cJSON *)object;
+ if (!cJSON_IsObject(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+ }
+ if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) {
+ goto done;
+ }
+
+ cJSON_DeleteItemFromObject(cjson, aws_string_c_str(tmp));
+ result = AWS_OP_SUCCESS;
+
+done:
+ aws_string_destroy_secure(tmp);
+ return result;
+}
+
+int aws_json_value_add_array_element(struct aws_json_value *array, const struct aws_json_value *value) {
+
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct cJSON *cjson_value = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson_value)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ cJSON_AddItemToArray(cjson, cjson_value);
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index) {
+
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (index > (size_t)cJSON_GetArraySize(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ return NULL;
+ }
+
+ return (void *)cJSON_GetArrayItem(cjson, (int)index);
+}
+
+size_t aws_json_get_array_size(const struct aws_json_value *array) {
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return 0;
+ }
+ return cJSON_GetArraySize(cjson);
+}
+
+int aws_json_value_remove_array_element(struct aws_json_value *array, size_t index) {
+
+ struct cJSON *cjson = (struct cJSON *)array;
+ if (!cJSON_IsArray(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (index > (size_t)cJSON_GetArraySize(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ }
+
+ cJSON_DeleteItemFromArray(cjson, (int)index);
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_json_value_is_string(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsString(cjson);
+}
+
+bool aws_json_value_is_number(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsNumber(cjson);
+}
+
+bool aws_json_value_is_array(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsArray(cjson);
+}
+
+bool aws_json_value_is_boolean(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsBool(cjson);
+}
+
+bool aws_json_value_is_null(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsNull(cjson);
+}
+
+bool aws_json_value_is_object(const struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return false;
+ }
+ return cJSON_IsObject(cjson);
+}
+
+static void *s_aws_cJSON_alloc(size_t sz) {
+ return aws_mem_acquire(s_aws_json_module_allocator, sz);
+}
+
+static void s_aws_cJSON_free(void *ptr) {
+ aws_mem_release(s_aws_json_module_allocator, ptr);
+}
+
+void aws_json_module_init(struct aws_allocator *allocator) {
+ if (!s_aws_json_module_initialized) {
+ s_aws_json_module_allocator = allocator;
+ struct cJSON_Hooks allocation_hooks = {.malloc_fn = s_aws_cJSON_alloc, .free_fn = s_aws_cJSON_free};
+ cJSON_InitHooks(&allocation_hooks);
+ s_aws_json_module_initialized = true;
+ }
+}
+
+void aws_json_module_cleanup(void) {
+ if (s_aws_json_module_initialized) {
+ s_aws_json_module_allocator = NULL;
+ s_aws_json_module_initialized = false;
+ }
+}
+
+void aws_json_value_destroy(struct aws_json_value *value) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (!cJSON_IsInvalid(cjson)) {
+ cJSON_Delete(cjson);
+ }
+}
+
+int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ char *tmp = cJSON_PrintUnformatted(cjson);
+ if (tmp == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ // Append the text to the byte buffer
+ struct aws_byte_cursor tmp_cursor = aws_byte_cursor_from_c_str(tmp);
+ int return_val = aws_byte_buf_append_dynamic_secure(output, &tmp_cursor);
+ s_aws_cJSON_free(tmp); // free the char* now that we do not need it
+ return return_val;
+}
+
+int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output) {
+ struct cJSON *cjson = (struct cJSON *)value;
+ if (cJSON_IsInvalid(cjson)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ char *tmp = cJSON_Print(cjson);
+ if (tmp == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ // Append the text to the byte buffer
+ struct aws_byte_cursor tmp_cursor = aws_byte_cursor_from_c_str(tmp);
+ int return_val = aws_byte_buf_append_dynamic_secure(output, &tmp_cursor);
+ s_aws_cJSON_free(tmp); // free the char* now that we do not need it
+ return return_val;
+}
+
+struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string) {
+ struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string);
+ struct cJSON *cjson = cJSON_Parse(aws_string_c_str(tmp));
+ aws_string_destroy_secure(tmp);
+ return (void *)cjson;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/log_writer.c b/contrib/restricted/aws/aws-c-common/source/log_writer.c
index 7b31e406d1..6eea2fc3c5 100644
--- a/contrib/restricted/aws/aws-c-common/source/log_writer.c
+++ b/contrib/restricted/aws/aws-c-common/source/log_writer.c
@@ -3,6 +3,7 @@
* SPDX-License-Identifier: Apache-2.0.
*/
+#include <aws/common/file.h>
#include <aws/common/log_writer.h>
#include <aws/common/string.h>
@@ -10,10 +11,6 @@
#include <errno.h>
#include <stdio.h>
-#ifdef _MSC_VER
-# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
-#endif /* _MSC_VER */
-
/*
* Basic log writer implementations - stdout, stderr, arbitrary file
*/
@@ -76,7 +73,7 @@ static int s_aws_file_writer_init_internal(
/* Open file if name passed in */
if (file_name_to_open != NULL) {
- impl->log_file = fopen(file_name_to_open, "a+");
+ impl->log_file = aws_fopen(file_name_to_open, "a+");
if (impl->log_file == NULL) {
aws_mem_release(allocator, impl);
return aws_translate_and_raise_io_error(errno);
diff --git a/contrib/restricted/aws/aws-c-common/source/logging.c b/contrib/restricted/aws/aws-c-common/source/logging.c
index 1b96e1cc6b..d7f0910da3 100644
--- a/contrib/restricted/aws/aws-c-common/source/logging.c
+++ b/contrib/restricted/aws/aws-c-common/source/logging.c
@@ -5,19 +5,18 @@
#include <aws/common/logging.h>
-#include <aws/common/string.h>
-
+#include <aws/common/file.h>
#include <aws/common/log_channel.h>
#include <aws/common/log_formatter.h>
#include <aws/common/log_writer.h>
#include <aws/common/mutex.h>
+#include <aws/common/string.h>
#include <errno.h>
#include <stdarg.h>
#if _MSC_VER
# pragma warning(disable : 4204) /* non-constant aggregate initializer */
-# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
#endif
/*
@@ -55,7 +54,11 @@ static struct aws_logger_vtable s_null_vtable = {
.clean_up = s_null_logger_clean_up,
};
-static struct aws_logger s_null_logger = {.vtable = &s_null_vtable, .allocator = NULL, .p_impl = NULL};
+static struct aws_logger s_null_logger = {
+ .vtable = &s_null_vtable,
+ .allocator = NULL,
+ .p_impl = NULL,
+};
/*
* Pipeline logger implementation
@@ -120,13 +123,22 @@ static enum aws_log_level s_aws_logger_pipeline_get_log_level(struct aws_logger
struct aws_logger_pipeline *impl = logger->p_impl;
- return impl->level;
+ return (enum aws_log_level)aws_atomic_load_int(&impl->level);
+}
+
+static int s_aws_logger_pipeline_set_log_level(struct aws_logger *logger, enum aws_log_level level) {
+ struct aws_logger_pipeline *impl = logger->p_impl;
+
+ aws_atomic_store_int(&impl->level, (size_t)level);
+
+ return AWS_OP_SUCCESS;
}
struct aws_logger_vtable g_pipeline_logger_owned_vtable = {
.get_log_level = s_aws_logger_pipeline_get_log_level,
.log = s_aws_logger_pipeline_log,
.clean_up = s_aws_logger_pipeline_owned_clean_up,
+ .set_log_level = s_aws_logger_pipeline_set_log_level,
};
int aws_logger_init_standard(
@@ -181,7 +193,7 @@ int aws_logger_init_standard(
impl->channel = channel;
impl->writer = writer;
impl->allocator = allocator;
- impl->level = options->level;
+ aws_atomic_store_int(&impl->level, (size_t)options->level);
logger->vtable = &g_pipeline_logger_owned_vtable;
logger->allocator = allocator;
@@ -224,6 +236,7 @@ static struct aws_logger_vtable s_pipeline_logger_unowned_vtable = {
.get_log_level = s_aws_logger_pipeline_get_log_level,
.log = s_aws_logger_pipeline_log,
.clean_up = s_aws_pipeline_logger_unowned_clean_up,
+ .set_log_level = s_aws_logger_pipeline_set_log_level,
};
int aws_logger_init_from_external(
@@ -244,7 +257,7 @@ int aws_logger_init_from_external(
impl->channel = channel;
impl->writer = writer;
impl->allocator = allocator;
- impl->level = level;
+ aws_atomic_store_int(&impl->level, (size_t)level);
logger->vtable = &s_pipeline_logger_unowned_vtable;
logger->allocator = allocator;
@@ -368,6 +381,17 @@ void aws_register_log_subject_info_list(struct aws_log_subject_info_list *log_su
const uint32_t min_range = log_subject_list->subject_list[0].subject_id;
const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS;
+#if DEBUG_BUILD
+ for (uint32_t i = 0; i < log_subject_list->count; ++i) {
+ const struct aws_log_subject_info *info = &log_subject_list->subject_list[i];
+ uint32_t expected_id = min_range + i;
+ if (expected_id != info->subject_id) {
+ fprintf(stderr, "\"%s\" is at wrong index in aws_log_subject_info[]\n", info->subject_name);
+ AWS_FATAL_ASSERT(0);
+ }
+ }
+#endif /* DEBUG_BUILD */
+
if (slot_index >= AWS_PACKAGE_SLOTS) {
/* This is an NDEBUG build apparently. Kill the process rather than
* corrupting heap. */
@@ -405,7 +429,7 @@ void aws_unregister_log_subject_info_list(struct aws_log_subject_info_list *log_
* no alloc implementation
*/
struct aws_logger_noalloc {
- enum aws_log_level level;
+ struct aws_atomic_var level;
FILE *file;
bool should_close;
struct aws_mutex lock;
@@ -415,7 +439,7 @@ static enum aws_log_level s_noalloc_stderr_logger_get_log_level(struct aws_logge
(void)subject;
struct aws_logger_noalloc *impl = logger->p_impl;
- return impl->level;
+ return (enum aws_log_level)aws_atomic_load_int(&impl->level);
}
#define MAXIMUM_NO_ALLOC_LOG_LINE_SIZE 8192
@@ -464,13 +488,15 @@ static int s_noalloc_stderr_logger_log(
aws_mutex_lock(&impl->lock);
+ int write_result = AWS_OP_SUCCESS;
if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) {
- return aws_translate_and_raise_io_error(errno);
+ aws_translate_and_raise_io_error(errno);
+ write_result = AWS_OP_ERR;
}
aws_mutex_unlock(&impl->lock);
- return AWS_OP_SUCCESS;
+ return write_result;
}
static void s_noalloc_stderr_logger_clean_up(struct aws_logger *logger) {
@@ -489,10 +515,19 @@ static void s_noalloc_stderr_logger_clean_up(struct aws_logger *logger) {
AWS_ZERO_STRUCT(*logger);
}
+int s_no_alloc_stderr_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level) {
+ struct aws_logger_noalloc *impl = logger->p_impl;
+
+ aws_atomic_store_int(&impl->level, (size_t)level);
+
+ return AWS_OP_SUCCESS;
+}
+
static struct aws_logger_vtable s_noalloc_stderr_vtable = {
.get_log_level = s_noalloc_stderr_logger_get_log_level,
.log = s_noalloc_stderr_logger_log,
.clean_up = s_noalloc_stderr_logger_clean_up,
+ .set_log_level = s_no_alloc_stderr_logger_set_log_level,
};
int aws_logger_init_noalloc(
@@ -506,12 +541,13 @@ int aws_logger_init_noalloc(
return AWS_OP_ERR;
}
- impl->level = options->level;
+ aws_atomic_store_int(&impl->level, (size_t)options->level);
+
if (options->file != NULL) {
impl->file = options->file;
impl->should_close = false;
} else { /* _MSC_VER */
- impl->file = fopen(options->filename, "w");
+ impl->file = aws_fopen(options->filename, "w");
impl->should_close = true;
}
@@ -523,3 +559,15 @@ int aws_logger_init_noalloc(
return AWS_OP_SUCCESS;
}
+
+int aws_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level) {
+ if (logger == NULL || logger->vtable == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (logger->vtable->set_log_level == NULL) {
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+
+ return logger->vtable->set_log_level(logger, level);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/memtrace.c b/contrib/restricted/aws/aws-c-common/source/memtrace.c
index 9b776211f9..7362e07a30 100644
--- a/contrib/restricted/aws/aws-c-common/source/memtrace.c
+++ b/contrib/restricted/aws/aws-c-common/source/memtrace.c
@@ -224,7 +224,7 @@ static int s_collect_stack_trace(void *context, struct aws_hash_element *item) {
struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller);
aws_byte_buf_append(&stacktrace, &cursor);
}
- free(symbols);
+ aws_mem_release(aws_default_allocator(), symbols);
/* record the resultant buffer as a string */
stack_info->trace = aws_string_new_from_array(aws_default_allocator(), stacktrace.buffer, stacktrace.len);
AWS_FATAL_ASSERT(stack_info->trace);
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/file.c b/contrib/restricted/aws/aws-c-common/source/posix/file.c
new file mode 100644
index 0000000000..7c26ade8c3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/posix/file.c
@@ -0,0 +1,279 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/environment.h>
+#include <aws/common/file.h>
+#include <aws/common/string.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode) {
+ return fopen(aws_string_c_str(file_path), aws_string_c_str(mode));
+}
+
+static int s_parse_and_raise_error(int errno_cpy) {
+ if (errno_cpy == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (errno_cpy == ENOENT || errno_cpy == ENOTDIR) {
+ return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
+ }
+
+ if (errno_cpy == EMFILE || errno_cpy == ENFILE) {
+ return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
+ }
+
+ if (errno_cpy == EACCES) {
+ return aws_raise_error(AWS_ERROR_NO_PERMISSION);
+ }
+
+ if (errno_cpy == ENOTEMPTY) {
+ return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY);
+ }
+
+ return aws_raise_error(AWS_ERROR_UNKNOWN);
+}
+
+int aws_directory_create(const struct aws_string *dir_path) {
+ int mkdir_ret = mkdir(aws_string_c_str(dir_path), S_IRWXU | S_IRWXG | S_IRWXO);
+
+ /** nobody cares if it already existed. */
+ if (mkdir_ret != 0 && errno != EEXIST) {
+ return s_parse_and_raise_error(errno);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_directory_exists(const struct aws_string *dir_path) {
+ struct stat dir_info;
+ if (lstat(aws_string_c_str(dir_path), &dir_info) == 0 && S_ISDIR(dir_info.st_mode)) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool s_delete_file_or_directory(const struct aws_directory_entry *entry, void *user_data) {
+ (void)user_data;
+
+ struct aws_allocator *allocator = aws_default_allocator();
+
+ struct aws_string *path_str = aws_string_new_from_cursor(allocator, &entry->relative_path);
+ int ret_val = AWS_OP_SUCCESS;
+
+ if (entry->file_type & AWS_FILE_TYPE_FILE) {
+ ret_val = aws_file_delete(path_str);
+ }
+
+ if (entry->file_type & AWS_FILE_TYPE_DIRECTORY) {
+ ret_val = aws_directory_delete(path_str, false);
+ }
+
+ aws_string_destroy(path_str);
+ return ret_val == AWS_OP_SUCCESS;
+}
+
+int aws_directory_delete(const struct aws_string *dir_path, bool recursive) {
+ if (!aws_directory_exists(dir_path)) {
+ return AWS_OP_SUCCESS;
+ }
+
+ int ret_val = AWS_OP_SUCCESS;
+
+ if (recursive) {
+ ret_val = aws_directory_traverse(aws_default_allocator(), dir_path, true, s_delete_file_or_directory, NULL);
+ }
+
+ if (ret_val && aws_last_error() == AWS_ERROR_FILE_INVALID_PATH) {
+ aws_reset_error();
+ return AWS_OP_SUCCESS;
+ }
+
+ if (ret_val) {
+ return AWS_OP_ERR;
+ }
+
+ int error_code = rmdir(aws_string_c_str(dir_path));
+
+ return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno);
+}
+
+int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to) {
+ int error_code = rename(aws_string_c_str(from), aws_string_c_str(to));
+
+ return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno);
+}
+
+int aws_file_delete(const struct aws_string *file_path) {
+ int error_code = unlink(aws_string_c_str(file_path));
+
+ if (!error_code || errno == ENOENT) {
+ return AWS_OP_SUCCESS;
+ }
+
+ return s_parse_and_raise_error(errno);
+}
+
+int aws_directory_traverse(
+ struct aws_allocator *allocator,
+ const struct aws_string *path,
+ bool recursive,
+ aws_on_directory_entry *on_entry,
+ void *user_data) {
+ DIR *dir = opendir(aws_string_c_str(path));
+
+ if (!dir) {
+ return s_parse_and_raise_error(errno);
+ }
+
+ struct aws_byte_cursor current_path = aws_byte_cursor_from_string(path);
+ if (current_path.ptr[current_path.len - 1] == AWS_PATH_DELIM) {
+ current_path.len -= 1;
+ }
+
+ struct dirent *dirent = NULL;
+ int ret_val = AWS_ERROR_SUCCESS;
+
+ errno = 0;
+ while (!ret_val && (dirent = readdir(dir)) != NULL) {
+ /* note: dirent->name_len is only defined on the BSDs, but not linux. It's not in the
+ * required posix spec. So we use dirent->d_name as a c string here. */
+ struct aws_byte_cursor name_component = aws_byte_cursor_from_c_str(dirent->d_name);
+
+ if (aws_byte_cursor_eq_c_str(&name_component, "..") || aws_byte_cursor_eq_c_str(&name_component, ".")) {
+ continue;
+ }
+
+ struct aws_byte_buf relative_path;
+ aws_byte_buf_init_copy_from_cursor(&relative_path, allocator, current_path);
+ aws_byte_buf_append_byte_dynamic(&relative_path, AWS_PATH_DELIM);
+ aws_byte_buf_append_dynamic(&relative_path, &name_component);
+ aws_byte_buf_append_byte_dynamic(&relative_path, 0);
+ relative_path.len -= 1;
+
+ struct aws_directory_entry entry;
+ AWS_ZERO_STRUCT(entry);
+
+ struct stat dir_info;
+ if (!lstat((const char *)relative_path.buffer, &dir_info)) {
+ if (S_ISDIR(dir_info.st_mode)) {
+ entry.file_type |= AWS_FILE_TYPE_DIRECTORY;
+ }
+ if (S_ISLNK(dir_info.st_mode)) {
+ entry.file_type |= AWS_FILE_TYPE_SYM_LINK;
+ }
+ if (S_ISREG(dir_info.st_mode)) {
+ entry.file_type |= AWS_FILE_TYPE_FILE;
+ entry.file_size = dir_info.st_size;
+ }
+
+ if (!entry.file_type) {
+ AWS_ASSERT("Unknown file type encountered");
+ }
+
+ entry.relative_path = aws_byte_cursor_from_buf(&relative_path);
+ const char *full_path = realpath((const char *)relative_path.buffer, NULL);
+
+ if (full_path) {
+ entry.path = aws_byte_cursor_from_c_str(full_path);
+ }
+
+ if (recursive && entry.file_type & AWS_FILE_TYPE_DIRECTORY) {
+ struct aws_string *rel_path_str = aws_string_new_from_cursor(allocator, &entry.relative_path);
+ ret_val = aws_directory_traverse(allocator, rel_path_str, recursive, on_entry, user_data);
+ aws_string_destroy(rel_path_str);
+ }
+
+ /* post order traversal, if a node below us ended the traversal, don't call the visitor again. */
+ if (ret_val && aws_last_error() == AWS_ERROR_OPERATION_INTERUPTED) {
+ goto cleanup;
+ }
+
+ if (!on_entry(&entry, user_data)) {
+ ret_val = aws_raise_error(AWS_ERROR_OPERATION_INTERUPTED);
+ goto cleanup;
+ }
+
+ if (ret_val) {
+ goto cleanup;
+ }
+
+ cleanup:
+ /* per https://man7.org/linux/man-pages/man3/realpath.3.html, realpath must be freed, if NULL was passed
+ * to the second argument. */
+ if (full_path) {
+ free((void *)full_path);
+ }
+ aws_byte_buf_clean_up(&relative_path);
+ }
+ }
+
+ closedir(dir);
+ return ret_val;
+}
+
+char aws_get_platform_directory_separator(void) {
+ return '/';
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME");
+
+struct aws_string *aws_get_home_directory(struct aws_allocator *allocator) {
+
+ /* ToDo: check getpwuid_r if environment check fails */
+ struct aws_string *home_env_var_value = NULL;
+ if (aws_get_environment_value(allocator, s_home_env_var, &home_env_var_value) == 0 && home_env_var_value != NULL) {
+ return home_env_var_value;
+ }
+
+ return NULL;
+}
+
+bool aws_path_exists(const struct aws_string *path) {
+ struct stat buffer;
+ return stat(aws_string_c_str(path), &buffer) == 0;
+}
+
+int aws_fseek(FILE *file, int64_t offset, int whence) {
+
+#ifdef AWS_HAVE_POSIX_LARGE_FILE_SUPPORT
+ int result = fseeko(file, offset, whence);
+#else
+ /* must use fseek(), which takes offset as a long */
+ if (offset < LONG_MIN || offset > LONG_MAX) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ int result = fseek(file, offset, whence);
+#endif /* AWS_HAVE_POSIX_LFS */
+
+ if (result != 0) {
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_file_get_length(FILE *file, int64_t *length) {
+
+ struct stat file_stats;
+
+ int fd = fileno(file);
+ if (fd == -1) {
+ return aws_raise_error(AWS_ERROR_INVALID_FILE_HANDLE);
+ }
+
+ if (fstat(fd, &file_stats)) {
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ *length = file_stats.st_size;
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
index 1311be4096..e841243fb1 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
@@ -8,11 +8,17 @@
#include <aws/common/byte_buf.h>
#include <aws/common/logging.h>
#include <aws/common/platform.h>
+#include <aws/common/private/dlloads.h>
#if defined(__FreeBSD__) || defined(__NetBSD__)
# define __BSD_VISIBLE 1
#endif
+#if defined(__linux__) || defined(__unix__)
+# include <sys/sysinfo.h>
+# include <sys/types.h>
+#endif
+
#include <unistd.h>
#if defined(HAVE_SYSCONF)
@@ -39,6 +45,74 @@ size_t aws_system_info_processor_count(void) {
#include <ctype.h>
#include <fcntl.h>
+uint16_t aws_get_cpu_group_count(void) {
+ if (g_numa_num_configured_nodes_ptr) {
+ return (uint16_t)g_numa_num_configured_nodes_ptr();
+ }
+
+ return 1u;
+}
+
+size_t aws_get_cpu_count_for_group(uint16_t group_idx) {
+ if (g_numa_node_of_cpu_ptr) {
+ size_t total_cpus = aws_system_info_processor_count();
+
+ uint16_t cpu_count = 0;
+ for (size_t i = 0; i < total_cpus; ++i) {
+ if (group_idx == g_numa_node_of_cpu_ptr((int)i)) {
+ cpu_count++;
+ }
+ }
+ return cpu_count;
+ }
+
+ return aws_system_info_processor_count();
+}
+
+void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length) {
+ AWS_PRECONDITION(cpu_ids_array);
+
+ if (!cpu_ids_array_length) {
+ return;
+ }
+
+ /* go ahead and initialize everything. */
+ for (size_t i = 0; i < cpu_ids_array_length; ++i) {
+ cpu_ids_array[i].cpu_id = -1;
+ cpu_ids_array[i].suspected_hyper_thread = false;
+ }
+
+ if (g_numa_node_of_cpu_ptr) {
+ size_t total_cpus = aws_system_info_processor_count();
+ size_t current_array_idx = 0;
+ for (size_t i = 0; i < total_cpus && current_array_idx < cpu_ids_array_length; ++i) {
+ if ((int)group_idx == g_numa_node_of_cpu_ptr((int)i)) {
+ cpu_ids_array[current_array_idx].cpu_id = (int32_t)i;
+
+ /* looking for an index jump is a more reliable way to find these. If they're in the group and then
+ * the index jumps, say from 17 to 36, we're most-likely in hyper-thread land. Also, inside a node,
+ * once we find the first hyper-thread, the remaining cores are also likely hyper threads. */
+ if (current_array_idx > 0 && (cpu_ids_array[current_array_idx - 1].suspected_hyper_thread ||
+ cpu_ids_array[current_array_idx - 1].cpu_id < ((int)i - 1))) {
+ cpu_ids_array[current_array_idx].suspected_hyper_thread = true;
+ }
+ current_array_idx += 1;
+ }
+ }
+
+ return;
+ }
+
+ /* a crude hint, but hyper-threads are numbered as the second half of the cpu id listing. The assumption if you
+ * hit here is that this is just listing all cpus on the system. */
+ size_t hyper_thread_hint = cpu_ids_array_length / 2 - 1;
+
+ for (size_t i = 0; i < cpu_ids_array_length; ++i) {
+ cpu_ids_array[i].cpu_id = (int32_t)i;
+ cpu_ids_array[i].suspected_hyper_thread = i > hyper_thread_hint;
+ }
+}
+
bool aws_is_debugger_present(void) {
/* Open the status file */
const int status_fd = open("/proc/self/status", O_RDONLY);
@@ -124,7 +198,7 @@ char *s_whitelist_chars(char *path) {
# include <dlfcn.h>
# include <mach-o/dyld.h>
static char s_exe_path[PATH_MAX];
-const char *s_get_executable_path() {
+static const char *s_get_executable_path(void) {
static const char *s_exe = NULL;
if (AWS_LIKELY(s_exe)) {
return s_exe;
@@ -312,43 +386,7 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) {
}
fprintf(fp, "################################################################################\n");
- fprintf(fp, "Resolved stacktrace:\n");
- fprintf(fp, "################################################################################\n");
- /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
- * or: <exe-or-shared-lib> [0x<addr>]
- * or: [0x<addr>]
- * start at 1 to skip the current frame (this function) */
- for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
- struct aws_stack_frame_info frame;
- AWS_ZERO_STRUCT(frame);
- const char *symbol = symbols[frame_idx];
- if (s_parse_symbol(symbol, stack_frames[frame_idx], &frame)) {
- goto parse_failed;
- }
-
- /* TODO: Emulate libunwind */
- char cmd[sizeof(struct aws_stack_frame_info)] = {0};
- s_resolve_cmd(cmd, sizeof(cmd), &frame);
- FILE *out = popen(cmd, "r");
- if (!out) {
- goto parse_failed;
- }
- char output[1024];
- if (fgets(output, sizeof(output), out)) {
- /* if addr2line or atos don't know what to do with an address, they just echo it */
- /* if there are spaces in the output, then they resolved something */
- if (strstr(output, " ")) {
- symbol = output;
- }
- }
- pclose(out);
-
- parse_failed:
- fprintf(fp, "%s%s", symbol, (symbol == symbols[frame_idx]) ? "\n" : "");
- }
-
- fprintf(fp, "################################################################################\n");
- fprintf(fp, "Raw stacktrace:\n");
+ fprintf(fp, "Stack trace:\n");
fprintf(fp, "################################################################################\n");
for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
const char *symbol = symbols[frame_idx];
@@ -359,6 +397,21 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) {
free(symbols);
}
+void aws_backtrace_log(int log_level) {
+ void *stack_frames[AWS_BACKTRACE_DEPTH];
+ size_t num_frames = aws_backtrace(stack_frames, AWS_BACKTRACE_DEPTH);
+ if (!num_frames) {
+ AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "Unable to capture backtrace");
+ return;
+ }
+ char **symbols = aws_backtrace_symbols(stack_frames, num_frames);
+ for (size_t line = 0; line < num_frames; ++line) {
+ const char *symbol = symbols[line];
+ AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "%s", symbol);
+ }
+ free(symbols);
+}
+
#else
void aws_backtrace_print(FILE *fp, void *call_site_data) {
(void)call_site_data;
@@ -382,21 +435,11 @@ char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) {
(void)stack_depth;
return NULL;
}
-#endif /* AWS_HAVE_EXECINFO */
-void aws_backtrace_log() {
- void *stack_frames[1024];
- size_t num_frames = aws_backtrace(stack_frames, 1024);
- if (!num_frames) {
- return;
- }
- char **symbols = aws_backtrace_addr2line(stack_frames, num_frames);
- for (size_t line = 0; line < num_frames; ++line) {
- const char *symbol = symbols[line];
- AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%s", symbol);
- }
- free(symbols);
+void aws_backtrace_log(int log_level) {
+ AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "aws_backtrace_log: no execinfo compatible backtrace API available");
}
+#endif /* AWS_HAVE_EXECINFO */
#if defined(AWS_OS_APPLE)
enum aws_platform_os aws_get_platform_build_os(void) {
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/thread.c b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
index 064d16882f..4f742afe02 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/thread.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
@@ -8,8 +8,10 @@
#endif
#include <aws/common/clock.h>
+#include <aws/common/linked_list.h>
#include <aws/common/logging.h>
#include <aws/common/private/dlloads.h>
+#include <aws/common/private/thread_shared.h>
#include <aws/common/thread.h>
#include <dlfcn.h>
@@ -25,10 +27,29 @@
typedef cpuset_t cpu_set_t;
#endif
+#if !defined(AWS_AFFINITY_METHOD)
+# error "Must provide a method for setting thread affinity"
+#endif
+
+// Possible methods for setting thread affinity
+#define AWS_AFFINITY_METHOD_NONE 0
+#define AWS_AFFINITY_METHOD_PTHREAD_ATTR 1
+#define AWS_AFFINITY_METHOD_PTHREAD 2
+
+// Ensure provided affinity method matches one of the supported values
+// clang-format off
+#if AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_NONE \
+ && AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_PTHREAD_ATTR \
+ && AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_PTHREAD
+// clang-format on
+# error "Invalid thread affinity method"
+#endif
+
static struct aws_thread_options s_default_options = {
/* this will make sure platform default stack size is used. */
.stack_size = 0,
.cpu_id = -1,
+ .join_strategy = AWS_TJS_MANUAL,
};
struct thread_atexit_callback {
@@ -39,44 +60,92 @@ struct thread_atexit_callback {
struct thread_wrapper {
struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
void (*func)(void *arg);
void *arg;
struct thread_atexit_callback *atexit;
void (*call_once)(void *);
void *once_arg;
- struct aws_thread *thread;
+
+ /*
+ * The managed thread system does lazy joins on threads once finished via their wrapper. For that to work
+ * we need something to join against, so we keep a by-value copy of the original thread here. The tricky part
+ * is how to set the threadid/handle of this copy since the copy must be injected into the thread function before
+ * the threadid/handle is known. We get around that by just querying it at the top of the wrapper thread function.
+ */
+ struct aws_thread thread_copy;
bool membind;
};
static AWS_THREAD_LOCAL struct thread_wrapper *tl_wrapper = NULL;
+/*
+ * thread_wrapper is platform-dependent so this function ends up being duplicated in each thread implementation
+ */
+void aws_thread_join_and_free_wrapper_list(struct aws_linked_list *wrapper_list) {
+ struct aws_linked_list_node *iter = aws_linked_list_begin(wrapper_list);
+ while (iter != aws_linked_list_end(wrapper_list)) {
+
+ struct thread_wrapper *join_thread_wrapper = AWS_CONTAINER_OF(iter, struct thread_wrapper, node);
+
+ /*
+ * Can't do a for-loop since we need to advance to the next wrapper before we free the wrapper
+ */
+ iter = aws_linked_list_next(iter);
+
+ join_thread_wrapper->thread_copy.detach_state = AWS_THREAD_JOINABLE;
+ aws_thread_join(&join_thread_wrapper->thread_copy);
+
+ /*
+ * This doesn't actually do anything when using posix threads, but it keeps us
+ * in sync with the Windows version as well as the lifecycle contract we're
+ * presenting for threads.
+ */
+ aws_thread_clean_up(&join_thread_wrapper->thread_copy);
+
+ aws_mem_release(join_thread_wrapper->allocator, join_thread_wrapper);
+
+ aws_thread_decrement_unjoined_count();
+ }
+}
+
static void *thread_fn(void *arg) {
- struct thread_wrapper wrapper = *(struct thread_wrapper *)arg;
+ struct thread_wrapper *wrapper_ptr = arg;
+
+ /*
+ * Make sure the aws_thread copy has the right thread id stored in it.
+ */
+ wrapper_ptr->thread_copy.thread_id = aws_thread_current_thread_id();
+
+ struct thread_wrapper wrapper = *wrapper_ptr;
struct aws_allocator *allocator = wrapper.allocator;
tl_wrapper = &wrapper;
+
if (wrapper.membind && g_set_mempolicy_ptr) {
AWS_LOGF_INFO(
AWS_LS_COMMON_THREAD,
- "id=%p: a cpu affinity was specified when launching this thread and set_mempolicy() is available on this "
- "system. Setting the memory policy to MPOL_PREFERRED",
- (void *)tl_wrapper->thread);
+ "a cpu affinity was specified when launching this thread and set_mempolicy() is available on this "
+ "system. Setting the memory policy to MPOL_PREFERRED");
/* if a user set a cpu id in their thread options, we're going to make sure the numa policy honors that
* and makes sure the numa node of the cpu we launched this thread on is where memory gets allocated. However,
* we don't want to fail the application if this fails, so make the call, and ignore the result. */
long resp = g_set_mempolicy_ptr(AWS_MPOL_PREFERRED_ALIAS, NULL, 0);
if (resp) {
- AWS_LOGF_WARN(
- AWS_LS_COMMON_THREAD,
- "id=%p: call to set_mempolicy() failed with errno %d",
- (void *)wrapper.thread,
- errno);
+ AWS_LOGF_WARN(AWS_LS_COMMON_THREAD, "call to set_mempolicy() failed with errno %d", errno);
}
}
wrapper.func(wrapper.arg);
- struct thread_atexit_callback *exit_callback_data = wrapper.atexit;
- aws_mem_release(allocator, arg);
+ /*
+ * Managed threads don't free the wrapper yet. The thread management system does it later after the thread
+ * is joined.
+ */
+ bool is_managed_thread = wrapper.thread_copy.detach_state == AWS_THREAD_MANAGED;
+ if (!is_managed_thread) {
+ aws_mem_release(allocator, arg);
+ }
+ struct thread_atexit_callback *exit_callback_data = wrapper.atexit;
while (exit_callback_data) {
aws_thread_atexit_fn *exit_callback = exit_callback_data->callback;
void *exit_callback_user_data = exit_callback_data->user_data;
@@ -89,6 +158,13 @@ static void *thread_fn(void *arg) {
}
tl_wrapper = NULL;
+ /*
+ * Release this thread to the managed thread system for lazy join.
+ */
+ if (is_managed_thread) {
+ aws_thread_pending_join_add(&wrapper_ptr->node);
+ }
+
return NULL;
}
@@ -138,6 +214,10 @@ int aws_thread_launch(
pthread_attr_t *attributes_ptr = NULL;
int attr_return = 0;
int allocation_failed = 0;
+ bool is_managed_thread = options != NULL && options->join_strategy == AWS_TJS_MANAGED;
+ if (is_managed_thread) {
+ thread->detach_state = AWS_THREAD_MANAGED;
+ }
if (options) {
attr_return = pthread_attr_init(&attributes);
@@ -160,7 +240,7 @@ int aws_thread_launch(
* NUMA or not is setup in interleave mode.
* Thread afinity is also not supported on Android systems, and honestly, if you're running android on a NUMA
* configuration, you've got bigger problems. */
-#if !defined(__MACH__) && !defined(__ANDROID__) && !defined(_musl_)
+#if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR
if (options->cpu_id >= 0) {
AWS_LOGF_INFO(
AWS_LS_COMMON_THREAD,
@@ -183,7 +263,7 @@ int aws_thread_launch(
goto cleanup;
}
}
-#endif /* !defined(__MACH__) && !defined(__ANDROID__) */
+#endif /* AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR */
}
struct thread_wrapper *wrapper =
@@ -198,17 +278,58 @@ int aws_thread_launch(
wrapper->membind = true;
}
- wrapper->thread = thread;
+ wrapper->thread_copy = *thread;
wrapper->allocator = thread->allocator;
wrapper->func = func;
wrapper->arg = arg;
+
+ /*
+ * Increment the count prior to spawning the thread. Decrement back if the create failed.
+ */
+ if (is_managed_thread) {
+ aws_thread_increment_unjoined_count();
+ }
+
attr_return = pthread_create(&thread->thread_id, attributes_ptr, thread_fn, (void *)wrapper);
if (attr_return) {
+ if (is_managed_thread) {
+ aws_thread_decrement_unjoined_count();
+ }
goto cleanup;
}
- thread->detach_state = AWS_THREAD_JOINABLE;
+#if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD
+ /* If we don't have pthread_attr_setaffinity_np, we may
+ * still be able to set the thread affinity after creation. */
+ if (options && options->cpu_id >= 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_COMMON_THREAD,
+ "id=%p: cpu affinity of cpu_id %d was specified, attempting to honor the value.",
+ (void *)thread,
+ options->cpu_id);
+
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET((uint32_t)options->cpu_id, &cpuset);
+
+ attr_return = pthread_setaffinity_np(thread->thread_id, sizeof(cpuset), &cpuset);
+ if (attr_return) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_THREAD, "id=%p: pthread_setaffinity_np() failed with %d.", (void *)thread, errno);
+ goto cleanup;
+ }
+ }
+#endif /* AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD */
+ /*
+ * Managed threads need to stay unjoinable from an external perspective. We'll handle it after thread function
+ * completion.
+ */
+ if (is_managed_thread) {
+ aws_thread_clean_up(thread);
+ } else {
+ thread->detach_state = AWS_THREAD_JOINABLE;
+ }
cleanup:
if (attributes_ptr) {
diff --git a/contrib/restricted/aws/aws-c-common/source/priority_queue.c b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
index 14ff421d5f..f7d0f54e2d 100644
--- a/contrib/restricted/aws/aws-c-common/source/priority_queue.c
+++ b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
@@ -100,7 +100,7 @@ static bool s_sift_up(struct aws_priority_queue *queue, size_t index) {
bool did_move = false;
- void *parent_item, *child_item;
+ void *parent_item = NULL, *child_item = NULL;
size_t parent = PARENT_OF(index);
while (index) {
/*
diff --git a/contrib/restricted/aws/aws-c-common/source/process_common.c b/contrib/restricted/aws/aws-c-common/source/process_common.c
index 9b734c46f8..ef432374b8 100644
--- a/contrib/restricted/aws/aws-c-common/source/process_common.c
+++ b/contrib/restricted/aws/aws-c-common/source/process_common.c
@@ -27,6 +27,18 @@ void aws_run_command_result_cleanup(struct aws_run_command_result *result) {
aws_string_destroy_secure(result->std_err);
}
+#if defined(AWS_OS_WINDOWS) && !defined(AWS_OS_WINDOWS_DESKTOP)
+int aws_run_command(
+ struct aws_allocator *allocator,
+ struct aws_run_command_options *options,
+ struct aws_run_command_result *result) {
+ (void)allocator;
+ (void)options;
+ (void)result;
+ return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+}
+#else
+
int aws_run_command(
struct aws_allocator *allocator,
struct aws_run_command_options *options,
@@ -44,11 +56,11 @@ int aws_run_command(
goto on_finish;
}
-#ifdef _WIN32
+# if defined(AWS_OS_WINDOWS)
output_stream = _popen(options->command, "r");
-#else
+# else
output_stream = popen(options->command, "r");
-#endif
+# endif
if (output_stream) {
while (!feof(output_stream)) {
@@ -59,11 +71,11 @@ int aws_run_command(
}
}
}
-#ifdef _WIN32
+# if defined(AWS_OS_WINDOWS)
result->ret_code = _pclose(output_stream);
-#else
+# else
result->ret_code = pclose(output_stream);
-#endif
+# endif
}
struct aws_byte_cursor trim_cursor = aws_byte_cursor_from_buf(&result_buffer);
@@ -80,3 +92,4 @@ on_finish:
aws_byte_buf_clean_up_secure(&result_buffer);
return ret;
}
+#endif /* !AWS_OS_WINDOWS */
diff --git a/contrib/restricted/aws/aws-c-common/source/promise.c b/contrib/restricted/aws/aws-c-common/source/promise.c
new file mode 100644
index 0000000000..444623d625
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/promise.c
@@ -0,0 +1,115 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/condition_variable.h>
+#include <aws/common/mutex.h>
+#include <aws/common/promise.h>
+#include <aws/common/ref_count.h>
+
+struct aws_promise {
+ struct aws_allocator *allocator;
+ struct aws_mutex mutex;
+ struct aws_condition_variable cv;
+ struct aws_ref_count rc;
+ bool complete;
+ int error_code;
+ void *value;
+
+ /* destructor for value, will be invoked if the value is not taken */
+ void (*dtor)(void *);
+};
+
+static void s_aws_promise_dtor(void *ptr) {
+ struct aws_promise *promise = ptr;
+ aws_condition_variable_clean_up(&promise->cv);
+ aws_mutex_clean_up(&promise->mutex);
+ if (promise->value && promise->dtor) {
+ promise->dtor(promise->value);
+ }
+ aws_mem_release(promise->allocator, promise);
+}
+
+struct aws_promise *aws_promise_new(struct aws_allocator *allocator) {
+ struct aws_promise *promise = aws_mem_calloc(allocator, 1, sizeof(struct aws_promise));
+ promise->allocator = allocator;
+ aws_ref_count_init(&promise->rc, promise, s_aws_promise_dtor);
+ aws_mutex_init(&promise->mutex);
+ aws_condition_variable_init(&promise->cv);
+ return promise;
+}
+
+struct aws_promise *aws_promise_acquire(struct aws_promise *promise) {
+ aws_ref_count_acquire(&promise->rc);
+ return promise;
+}
+
+void aws_promise_release(struct aws_promise *promise) {
+ aws_ref_count_release(&promise->rc);
+}
+
+static bool s_promise_completed(void *user_data) {
+ struct aws_promise *promise = user_data;
+ return promise->complete;
+}
+
+void aws_promise_wait(struct aws_promise *promise) {
+ aws_mutex_lock(&promise->mutex);
+ aws_condition_variable_wait_pred(&promise->cv, &promise->mutex, s_promise_completed, promise);
+ aws_mutex_unlock(&promise->mutex);
+}
+
+bool aws_promise_wait_for(struct aws_promise *promise, size_t nanoseconds) {
+ aws_mutex_lock(&promise->mutex);
+ aws_condition_variable_wait_for_pred(
+ &promise->cv, &promise->mutex, (int64_t)nanoseconds, s_promise_completed, promise);
+ const bool complete = promise->complete;
+ aws_mutex_unlock(&promise->mutex);
+ return complete;
+}
+
+bool aws_promise_is_complete(struct aws_promise *promise) {
+ aws_mutex_lock(&promise->mutex);
+ const bool complete = promise->complete;
+ aws_mutex_unlock(&promise->mutex);
+ return complete;
+}
+
+void aws_promise_complete(struct aws_promise *promise, void *value, void (*dtor)(void *)) {
+ aws_mutex_lock(&promise->mutex);
+ AWS_FATAL_ASSERT(!promise->complete && "aws_promise_complete: cannot complete a promise more than once");
+ promise->value = value;
+ promise->dtor = dtor;
+ promise->complete = true;
+ aws_mutex_unlock(&promise->mutex);
+ aws_condition_variable_notify_all(&promise->cv);
+}
+
+void aws_promise_fail(struct aws_promise *promise, int error_code) {
+ AWS_FATAL_ASSERT(error_code != 0 && "aws_promise_fail: cannot fail a promise with a 0 error_code");
+ aws_mutex_lock(&promise->mutex);
+ AWS_FATAL_ASSERT(!promise->complete && "aws_promise_fail: cannot complete a promise more than once");
+ promise->error_code = error_code;
+ promise->complete = true;
+ aws_mutex_unlock(&promise->mutex);
+ aws_condition_variable_notify_all(&promise->cv);
+}
+
+int aws_promise_error_code(struct aws_promise *promise) {
+ AWS_FATAL_ASSERT(aws_promise_is_complete(promise));
+ return promise->error_code;
+}
+
+void *aws_promise_value(struct aws_promise *promise) {
+ AWS_FATAL_ASSERT(aws_promise_is_complete(promise));
+ return promise->value;
+}
+
+void *aws_promise_take_value(struct aws_promise *promise) {
+ AWS_FATAL_ASSERT(aws_promise_is_complete(promise));
+ void *value = promise->value;
+ promise->value = NULL;
+ promise->dtor = NULL;
+ return value;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/ref_count.c b/contrib/restricted/aws/aws-c-common/source/ref_count.c
index a1d938b022..1e90e4c8a1 100644
--- a/contrib/restricted/aws/aws-c-common/source/ref_count.c
+++ b/contrib/restricted/aws/aws-c-common/source/ref_count.c
@@ -15,7 +15,9 @@ void aws_ref_count_init(struct aws_ref_count *ref_count, void *object, aws_simpl
}
void *aws_ref_count_acquire(struct aws_ref_count *ref_count) {
- aws_atomic_fetch_add(&ref_count->ref_count, 1);
+ size_t old_value = aws_atomic_fetch_add(&ref_count->ref_count, 1);
+ AWS_ASSERT(old_value > 0 && "refcount has been zero, it's invalid to use it again.");
+ (void)old_value;
return ref_count->object;
}
@@ -29,52 +31,3 @@ size_t aws_ref_count_release(struct aws_ref_count *ref_count) {
return old_value - 1;
}
-
-static struct aws_condition_variable s_global_thread_signal = AWS_CONDITION_VARIABLE_INIT;
-static struct aws_mutex s_global_thread_lock = AWS_MUTEX_INIT;
-static uint32_t s_global_thread_count = 0;
-
-void aws_global_thread_creator_increment(void) {
- aws_mutex_lock(&s_global_thread_lock);
- ++s_global_thread_count;
- aws_mutex_unlock(&s_global_thread_lock);
-}
-
-void aws_global_thread_creator_decrement(void) {
- bool signal = false;
- aws_mutex_lock(&s_global_thread_lock);
- AWS_ASSERT(s_global_thread_count != 0 && "global tracker has gone negative");
- --s_global_thread_count;
- if (s_global_thread_count == 0) {
- signal = true;
- }
- aws_mutex_unlock(&s_global_thread_lock);
-
- if (signal) {
- aws_condition_variable_notify_all(&s_global_thread_signal);
- }
-}
-
-static bool s_thread_count_zero_pred(void *user_data) {
- (void)user_data;
-
- return s_global_thread_count == 0;
-}
-
-void aws_global_thread_creator_shutdown_wait(void) {
- aws_mutex_lock(&s_global_thread_lock);
- aws_condition_variable_wait_pred(&s_global_thread_signal, &s_global_thread_lock, s_thread_count_zero_pred, NULL);
- aws_mutex_unlock(&s_global_thread_lock);
-}
-
-int aws_global_thread_creator_shutdown_wait_for(uint32_t wait_timeout_in_seconds) {
- int64_t wait_time_in_nanos =
- aws_timestamp_convert(wait_timeout_in_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
-
- aws_mutex_lock(&s_global_thread_lock);
- int result = aws_condition_variable_wait_for_pred(
- &s_global_thread_signal, &s_global_thread_lock, wait_time_in_nanos, s_thread_count_zero_pred, NULL);
- aws_mutex_unlock(&s_global_thread_lock);
-
- return result;
-}
diff --git a/contrib/restricted/aws/aws-c-common/source/resource_name.c b/contrib/restricted/aws/aws-c-common/source/resource_name.c
deleted file mode 100644
index 0a7b972ea1..0000000000
--- a/contrib/restricted/aws/aws-c-common/source/resource_name.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/resource_name.h>
-
-#define ARN_SPLIT_COUNT ((size_t)5)
-#define ARN_PARTS_COUNT ((size_t)6)
-
-static const char ARN_DELIMETER[] = ":";
-static const char ARN_DELIMETER_CHAR = ':';
-
-static const size_t DELIMETER_LEN = 8; /* strlen("arn:::::") */
-
-AWS_COMMON_API
-int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input) {
- struct aws_byte_cursor arn_parts[ARN_PARTS_COUNT];
- struct aws_array_list arn_part_list;
- aws_array_list_init_static(&arn_part_list, arn_parts, ARN_PARTS_COUNT, sizeof(struct aws_byte_cursor));
- if (aws_byte_cursor_split_on_char_n(input, ARN_DELIMETER_CHAR, ARN_SPLIT_COUNT, &arn_part_list)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- struct aws_byte_cursor *arn_prefix;
- if (aws_array_list_get_at_ptr(&arn_part_list, (void **)&arn_prefix, 0) ||
- !aws_byte_cursor_eq_c_str(arn_prefix, "arn")) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->partition, 1)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->service, 2)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->region, 3)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->account_id, 4) || aws_byte_cursor_eq_c_str(&arn->account_id, "")) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->resource_id, 5)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- return AWS_OP_SUCCESS;
-}
-
-AWS_COMMON_API
-int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
-
- *size = arn->partition.len + arn->region.len + arn->service.len + arn->account_id.len + arn->resource_id.len +
- DELIMETER_LEN;
-
- return AWS_OP_SUCCESS;
-}
-
-AWS_COMMON_API
-int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
-
- const struct aws_byte_cursor prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:");
- const struct aws_byte_cursor colon_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ARN_DELIMETER);
-
- if (aws_byte_buf_append(buf, &prefix)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &arn->partition)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->service)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->region)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->account_id)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->resource_id)) {
- return aws_raise_error(aws_last_error());
- }
-
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return AWS_OP_SUCCESS;
-}
diff --git a/contrib/restricted/aws/aws-c-common/source/ring_buffer.c b/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
index 6ebecebf47..bcc8ffaad3 100644
--- a/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
+++ b/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
@@ -73,7 +73,7 @@ int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_s
/* this branch is, we don't have any vended buffers. */
if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
+ size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
if (requested_size > ring_space) {
AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
@@ -147,7 +147,7 @@ int aws_ring_buffer_acquire_up_to(
/* this branch is, we don't have any vended buffers. */
if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
+ size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
@@ -232,10 +232,11 @@ static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buff
#ifdef CBMC
/* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
* (C11 6.5.8) */
- if (!__CPROVER_same_object(buf->buffer, ring_buffer->allocation) ||
- !__CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)) {
- return false;
- }
+ return (
+ __CPROVER_same_object(buf->buffer, ring_buffer->allocation) &&
+ AWS_IMPLIES(
+ ring_buffer->allocation_end != NULL, __CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)));
+
#endif
return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
@@ -258,64 +259,3 @@ bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buff
AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
return rval;
}
-
-/* Ring buffer allocator implementation */
-static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) {
- struct aws_ring_buffer *buffer = allocator->impl;
- struct aws_byte_buf buf;
- AWS_ZERO_STRUCT(buf);
- /* allocate extra space for the size */
- if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) {
- return NULL;
- }
- /* store the size ahead of the allocation */
- *((size_t *)buf.buffer) = buf.capacity;
- return buf.buffer + sizeof(size_t);
-}
-
-static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) {
- /* back up to where the size is stored */
- const void *addr = ((uint8_t *)ptr - sizeof(size_t));
- const size_t size = *((size_t *)addr);
-
- struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size);
- buf.allocator = allocator;
-
- struct aws_ring_buffer *buffer = allocator->impl;
- aws_ring_buffer_release(buffer, &buf);
-}
-
-static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- void *mem = s_ring_buffer_mem_acquire(allocator, num * size);
- if (!mem) {
- return NULL;
- }
- memset(mem, 0, num * size);
- return mem;
-}
-
-static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) {
- (void)allocator;
- (void)ptr;
- (void)old_size;
- (void)new_size;
- AWS_FATAL_ASSERT(!"ring_buffer_allocator does not support realloc, as it breaks allocation ordering");
- return NULL;
-}
-
-int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) {
- if (allocator == NULL || ring_buffer == NULL) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- allocator->impl = ring_buffer;
- allocator->mem_acquire = s_ring_buffer_mem_acquire;
- allocator->mem_release = s_ring_buffer_mem_release;
- allocator->mem_calloc = s_ring_buffer_mem_calloc;
- allocator->mem_realloc = s_ring_buffer_mem_realloc;
- return AWS_OP_SUCCESS;
-}
-
-void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) {
- AWS_ZERO_STRUCT(*allocator);
-}
diff --git a/contrib/restricted/aws/aws-c-common/source/string.c b/contrib/restricted/aws/aws-c-common/source/string.c
index d1abf0dbff..a3d2c204ed 100644
--- a/contrib/restricted/aws/aws-c-common/source/string.c
+++ b/contrib/restricted/aws/aws-c-common/source/string.c
@@ -4,6 +4,183 @@
*/
#include <aws/common/string.h>
+#ifdef _WIN32
+# include <windows.h>
+
+struct aws_wstring *aws_string_convert_to_wstring(
+ struct aws_allocator *allocator,
+ const struct aws_string *to_convert) {
+ AWS_PRECONDITION(to_convert);
+
+ struct aws_byte_cursor convert_cur = aws_byte_cursor_from_string(to_convert);
+ return aws_string_convert_to_wchar_from_byte_cursor(allocator, &convert_cur);
+}
+
+struct aws_wstring *aws_string_convert_to_wchar_from_byte_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *to_convert) {
+ AWS_PRECONDITION(to_convert);
+
+ /* if a length is passed for the to_convert string, converted size does not include the null terminator,
+ * which is a good thing. */
+ int converted_size = MultiByteToWideChar(CP_UTF8, 0, (const char *)to_convert->ptr, (int)to_convert->len, NULL, 0);
+
+ if (!converted_size) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ size_t str_len_size = 0;
+ size_t malloc_size = 0;
+
+ /* double the size because the return value above is # of characters, not bytes size. */
+ if (aws_mul_size_checked(sizeof(wchar_t), converted_size, &str_len_size)) {
+ return NULL;
+ }
+
+ /* UTF-16, the NULL terminator is two bytes. */
+ if (aws_add_size_checked(sizeof(struct aws_wstring) + 2, str_len_size, &malloc_size)) {
+ return NULL;
+ }
+
+ struct aws_wstring *str = aws_mem_acquire(allocator, malloc_size);
+ if (!str) {
+ return NULL;
+ }
+
+ /* Fields are declared const, so we need to copy them in like this */
+ *(struct aws_allocator **)(&str->allocator) = allocator;
+ *(size_t *)(&str->len) = (size_t)converted_size;
+
+ int converted_res = MultiByteToWideChar(
+ CP_UTF8, 0, (const char *)to_convert->ptr, (int)to_convert->len, (wchar_t *)str->bytes, converted_size);
+ /* windows had its chance to do its thing, no take backsies. */
+ AWS_FATAL_ASSERT(converted_res > 0);
+
+ *(wchar_t *)&str->bytes[converted_size] = 0;
+ return str;
+}
+
+struct aws_wstring *aws_wstring_new_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *w_str_cur) {
+ AWS_PRECONDITION(allocator && aws_byte_cursor_is_valid(w_str_cur));
+ return aws_wstring_new_from_array(allocator, (wchar_t *)w_str_cur->ptr, w_str_cur->len / sizeof(wchar_t));
+}
+
+struct aws_wstring *aws_wstring_new_from_array(struct aws_allocator *allocator, const wchar_t *w_str, size_t len) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(w_str, len));
+
+ size_t str_byte_len = 0;
+ size_t malloc_size = 0;
+
+ /* double the size because the return value above is # of characters, not bytes size. */
+ if (aws_mul_size_checked(sizeof(wchar_t), len, &str_byte_len)) {
+ return NULL;
+ }
+
+ /* UTF-16, the NULL terminator is two bytes. */
+ if (aws_add_size_checked(sizeof(struct aws_wstring) + 2, str_byte_len, &malloc_size)) {
+ return NULL;
+ }
+
+ struct aws_wstring *str = aws_mem_acquire(allocator, malloc_size);
+
+ /* Fields are declared const, so we need to copy them in like this */
+ *(struct aws_allocator **)(&str->allocator) = allocator;
+ *(size_t *)(&str->len) = len;
+ if (len > 0) {
+ memcpy((void *)str->bytes, w_str, str_byte_len);
+ }
+ /* in case this is a utf-16 string in the array, allow that here. */
+ *(wchar_t *)&str->bytes[len] = 0;
+ AWS_RETURN_WITH_POSTCONDITION(str, aws_wstring_is_valid(str));
+}
+
+bool aws_wstring_is_valid(const struct aws_wstring *str) {
+ return str && AWS_MEM_IS_READABLE(&str->bytes[0], str->len + 1) && str->bytes[str->len] == 0;
+}
+
+void aws_wstring_destroy(struct aws_wstring *str) {
+ AWS_PRECONDITION(!str || aws_wstring_is_valid(str));
+ if (str && str->allocator) {
+ aws_mem_release(str->allocator, str);
+ }
+}
+
+static struct aws_string *s_convert_from_wchar(
+ struct aws_allocator *allocator,
+ const wchar_t *to_convert,
+ int len_chars) {
+ AWS_FATAL_PRECONDITION(to_convert);
+
+ int bytes_size = WideCharToMultiByte(CP_UTF8, 0, to_convert, len_chars, NULL, 0, NULL, NULL);
+
+ if (!bytes_size) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ size_t malloc_size = 0;
+
+ /* bytes_size already contains the space for the null terminator */
+ if (aws_add_size_checked(sizeof(struct aws_string), bytes_size, &malloc_size)) {
+ return NULL;
+ }
+
+ struct aws_string *str = aws_mem_acquire(allocator, malloc_size);
+ if (!str) {
+ return NULL;
+ }
+
+ /* Fields are declared const, so we need to copy them in like this */
+ *(struct aws_allocator **)(&str->allocator) = allocator;
+ *(size_t *)(&str->len) = (size_t)bytes_size - 1;
+
+ int converted_res =
+ WideCharToMultiByte(CP_UTF8, 0, to_convert, len_chars, (char *)str->bytes, bytes_size, NULL, NULL);
+ /* windows had its chance to do its thing, no take backsies. */
+ AWS_FATAL_ASSERT(converted_res > 0);
+
+ *(uint8_t *)&str->bytes[str->len] = 0;
+ return str;
+}
+
+struct aws_string *aws_string_convert_from_wchar_str(
+ struct aws_allocator *allocator,
+ const struct aws_wstring *to_convert) {
+ AWS_FATAL_PRECONDITION(to_convert);
+
+ return s_convert_from_wchar(allocator, aws_wstring_c_str(to_convert), (int)aws_wstring_num_chars(to_convert));
+}
+struct aws_string *aws_string_convert_from_wchar_c_str(struct aws_allocator *allocator, const wchar_t *to_convert) {
+ return s_convert_from_wchar(allocator, to_convert, -1);
+}
+
+const wchar_t *aws_wstring_c_str(const struct aws_wstring *str) {
+ AWS_PRECONDITION(str);
+ return str->bytes;
+}
+
+size_t aws_wstring_num_chars(const struct aws_wstring *str) {
+ AWS_PRECONDITION(str);
+
+ if (str->len == 0) {
+ return 0;
+ }
+
+ return str->len;
+}
+
+size_t aws_wstring_size_bytes(const struct aws_wstring *str) {
+ AWS_PRECONDITION(str);
+
+ return aws_wstring_num_chars(str) * sizeof(wchar_t);
+}
+
+#endif /* _WIN32 */
+
struct aws_string *aws_string_new_from_c_str(struct aws_allocator *allocator, const char *c_str) {
AWS_PRECONDITION(allocator && c_str);
return aws_string_new_from_array(allocator, (const uint8_t *)c_str, strlen(c_str));
@@ -27,7 +204,7 @@ struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, co
if (len > 0) {
memcpy((void *)str->bytes, bytes, len);
}
- *(uint8_t *)&str->bytes[len] = '\0';
+ *(uint8_t *)&str->bytes[len] = 0;
AWS_RETURN_WITH_POSTCONDITION(str, aws_string_is_valid(str));
}
diff --git a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
index 31ce7af1ab..4467b12493 100644
--- a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
+++ b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
@@ -40,6 +40,7 @@ void aws_task_run(struct aws_task *task, enum aws_task_status status) {
task->type_tag,
aws_task_status_to_c_str(status));
+ task->abi_extension.scheduled = false;
task->fn(task, task->arg, status);
}
@@ -139,6 +140,7 @@ void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struc
task->timestamp = 0;
aws_linked_list_push_back(&scheduler->asap_list, &task->node);
+ task->abi_extension.scheduled = true;
}
void aws_task_scheduler_schedule_future(
@@ -177,6 +179,7 @@ void aws_task_scheduler_schedule_future(
}
aws_linked_list_insert_before(node_i, &task->node);
}
+ task->abi_extension.scheduled = true;
}
void aws_task_scheduler_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time) {
@@ -253,7 +256,7 @@ void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct
*/
if (task->node.next) {
aws_linked_list_remove(&task->node);
- } else {
+ } else if (task->abi_extension.scheduled) {
aws_priority_queue_remove(&scheduler->timed_queue, &task, &task->priority_queue_node);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c b/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
new file mode 100644
index 0000000000..7999344b7b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/thread_scheduler.c
@@ -0,0 +1,225 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/clock.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/task_scheduler.h>
+#include <aws/common/thread.h>
+#include <aws/common/thread_scheduler.h>
+
+struct aws_thread_scheduler {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+ struct aws_thread thread;
+ struct aws_task_scheduler scheduler;
+ struct aws_atomic_var should_exit;
+
+ struct {
+ struct aws_linked_list scheduling_queue;
+ struct aws_linked_list cancel_queue;
+ struct aws_mutex mutex;
+ struct aws_condition_variable c_var;
+ } thread_data;
+};
+
+struct cancellation_node {
+ struct aws_task *task_to_cancel;
+ struct aws_linked_list_node node;
+};
+
+static void s_destroy_callback(void *arg) {
+ struct aws_thread_scheduler *scheduler = arg;
+ aws_atomic_store_int(&scheduler->should_exit, 1u);
+ aws_condition_variable_notify_all(&scheduler->thread_data.c_var);
+ aws_thread_join(&scheduler->thread);
+ aws_task_scheduler_clean_up(&scheduler->scheduler);
+ aws_condition_variable_clean_up(&scheduler->thread_data.c_var);
+ aws_mutex_clean_up(&scheduler->thread_data.mutex);
+ aws_thread_clean_up(&scheduler->thread);
+ aws_mem_release(scheduler->allocator, scheduler);
+}
+
+static bool s_thread_should_wake(void *arg) {
+ struct aws_thread_scheduler *scheduler = arg;
+
+ uint64_t current_time = 0;
+ aws_high_res_clock_get_ticks(&current_time);
+
+ uint64_t next_scheduled_task = 0;
+ aws_task_scheduler_has_tasks(&scheduler->scheduler, &next_scheduled_task);
+ return aws_atomic_load_int(&scheduler->should_exit) ||
+ !aws_linked_list_empty(&scheduler->thread_data.scheduling_queue) ||
+ !aws_linked_list_empty(&scheduler->thread_data.cancel_queue) || (next_scheduled_task <= current_time);
+}
+
+static void s_thread_fn(void *arg) {
+ struct aws_thread_scheduler *scheduler = arg;
+
+ while (!aws_atomic_load_int(&scheduler->should_exit)) {
+
+ /* move tasks from the mutex protected list to the scheduler. This is because we don't want to hold the lock
+ * for the scheduler during run_all and then try and acquire the lock from another thread to schedule something
+ * because that potentially would block the calling thread. */
+ struct aws_linked_list list_cpy;
+ aws_linked_list_init(&list_cpy);
+ struct aws_linked_list cancel_list_cpy;
+ aws_linked_list_init(&cancel_list_cpy);
+
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+ aws_linked_list_swap_contents(&scheduler->thread_data.scheduling_queue, &list_cpy);
+ aws_linked_list_swap_contents(&scheduler->thread_data.cancel_queue, &cancel_list_cpy);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+
+ while (!aws_linked_list_empty(&list_cpy)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&list_cpy);
+ struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node);
+ if (task->timestamp) {
+ aws_task_scheduler_schedule_future(&scheduler->scheduler, task, task->timestamp);
+ } else {
+ aws_task_scheduler_schedule_now(&scheduler->scheduler, task);
+ }
+ }
+
+ /* now cancel the tasks. */
+ while (!aws_linked_list_empty(&cancel_list_cpy)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&cancel_list_cpy);
+ struct cancellation_node *cancellation_node = AWS_CONTAINER_OF(node, struct cancellation_node, node);
+ aws_task_scheduler_cancel_task(&scheduler->scheduler, cancellation_node->task_to_cancel);
+ aws_mem_release(scheduler->allocator, cancellation_node);
+ }
+
+ /* now run everything */
+ uint64_t current_time = 0;
+ aws_high_res_clock_get_ticks(&current_time);
+ aws_task_scheduler_run_all(&scheduler->scheduler, current_time);
+
+ uint64_t next_scheduled_task = 0;
+ aws_task_scheduler_has_tasks(&scheduler->scheduler, &next_scheduled_task);
+
+ int64_t timeout = 0;
+ if (next_scheduled_task == UINT64_MAX) {
+ /* at least wake up once per 30 seconds. */
+ timeout = (int64_t)30 * (int64_t)AWS_TIMESTAMP_NANOS;
+ } else {
+ timeout = (int64_t)(next_scheduled_task - current_time);
+ }
+
+ if (timeout > 0) {
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+
+ aws_condition_variable_wait_for_pred(
+ &scheduler->thread_data.c_var, &scheduler->thread_data.mutex, timeout, s_thread_should_wake, scheduler);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+ }
+ }
+}
+
+struct aws_thread_scheduler *aws_thread_scheduler_new(
+ struct aws_allocator *allocator,
+ const struct aws_thread_options *thread_options) {
+ struct aws_thread_scheduler *scheduler = aws_mem_calloc(allocator, 1, sizeof(struct aws_thread_scheduler));
+
+ if (!scheduler) {
+ return NULL;
+ }
+
+ if (aws_thread_init(&scheduler->thread, allocator)) {
+ goto clean_up;
+ }
+
+ AWS_FATAL_ASSERT(!aws_mutex_init(&scheduler->thread_data.mutex) && "mutex init failed!");
+ AWS_FATAL_ASSERT(!aws_condition_variable_init(&scheduler->thread_data.c_var) && "condition variable init failed!");
+
+ if (aws_task_scheduler_init(&scheduler->scheduler, allocator)) {
+ goto thread_init;
+ }
+
+ scheduler->allocator = allocator;
+ aws_atomic_init_int(&scheduler->should_exit, 0u);
+ aws_ref_count_init(&scheduler->ref_count, scheduler, s_destroy_callback);
+ aws_linked_list_init(&scheduler->thread_data.scheduling_queue);
+ aws_linked_list_init(&scheduler->thread_data.cancel_queue);
+
+ if (aws_thread_launch(&scheduler->thread, s_thread_fn, scheduler, thread_options)) {
+ goto scheduler_init;
+ }
+
+ return scheduler;
+
+scheduler_init:
+ aws_task_scheduler_clean_up(&scheduler->scheduler);
+
+thread_init:
+ aws_condition_variable_clean_up(&scheduler->thread_data.c_var);
+ aws_mutex_clean_up(&scheduler->thread_data.mutex);
+ aws_thread_clean_up(&scheduler->thread);
+
+clean_up:
+ aws_mem_release(allocator, scheduler);
+
+ return NULL;
+}
+
+void aws_thread_scheduler_acquire(struct aws_thread_scheduler *scheduler) {
+ aws_ref_count_acquire(&scheduler->ref_count);
+}
+
+void aws_thread_scheduler_release(const struct aws_thread_scheduler *scheduler) {
+ aws_ref_count_release((struct aws_ref_count *)&scheduler->ref_count);
+}
+
+void aws_thread_scheduler_schedule_future(
+ struct aws_thread_scheduler *scheduler,
+ struct aws_task *task,
+ uint64_t time_to_run) {
+ task->timestamp = time_to_run;
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+ aws_linked_list_push_back(&scheduler->thread_data.scheduling_queue, &task->node);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+ aws_condition_variable_notify_one(&scheduler->thread_data.c_var);
+}
+void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task) {
+ aws_thread_scheduler_schedule_future(scheduler, task, 0u);
+}
+
+void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task) {
+ struct cancellation_node *cancellation_node =
+ aws_mem_calloc(scheduler->allocator, 1, sizeof(struct cancellation_node));
+ AWS_FATAL_ASSERT(cancellation_node && "allocation failed for cancellation node!");
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!");
+ struct aws_task *found_task = NULL;
+
+ /* remove tasks that are still in the scheduling queue, but haven't made it to the scheduler yet. */
+ struct aws_linked_list_node *node = aws_linked_list_empty(&scheduler->thread_data.scheduling_queue)
+ ? NULL
+ : aws_linked_list_front(&scheduler->thread_data.scheduling_queue);
+ while (node != NULL) {
+ struct aws_task *potential_task = AWS_CONTAINER_OF(node, struct aws_task, node);
+
+ if (potential_task == task) {
+ found_task = potential_task;
+ break;
+ }
+
+ if (aws_linked_list_node_next_is_valid(node)) {
+ node = aws_linked_list_next(node);
+ } else {
+ node = NULL;
+ }
+ }
+
+ if (found_task) {
+ aws_linked_list_remove(&found_task->node);
+ }
+
+ cancellation_node->task_to_cancel = task;
+
+ /* regardless put it in the cancel queue so the thread can call the task with canceled status. */
+ aws_linked_list_push_back(&scheduler->thread_data.cancel_queue, &cancellation_node->node);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!");
+ /* notify so the loop knows to wakeup and process the cancellations. */
+ aws_condition_variable_notify_one(&scheduler->thread_data.c_var);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/thread_shared.c b/contrib/restricted/aws/aws-c-common/source/thread_shared.c
new file mode 100644
index 0000000000..a0d19adfe0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-common/source/thread_shared.c
@@ -0,0 +1,167 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/private/thread_shared.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/mutex.h>
+
+/*
+ * lock guarding the unjoined thread count and pending join list
+ */
+static struct aws_mutex s_managed_thread_lock = AWS_MUTEX_INIT;
+static struct aws_condition_variable s_managed_thread_signal = AWS_CONDITION_VARIABLE_INIT;
+static uint64_t s_default_managed_join_timeout_ns = 0;
+
+/*
+ * The number of successfully launched managed threads (or event loop threads which participate by inc/dec) that
+ * have not been joined yet.
+ */
+static uint32_t s_unjoined_thread_count = 0;
+
+/*
+ * A list of thread_wrapper structs for threads whose thread function has finished but join has not been called
+ * yet for the thread.
+ *
+ * This list is only ever at most length one.
+ */
+static struct aws_linked_list s_pending_join_managed_threads;
+
+void aws_thread_increment_unjoined_count(void) {
+ aws_mutex_lock(&s_managed_thread_lock);
+ ++s_unjoined_thread_count;
+ aws_mutex_unlock(&s_managed_thread_lock);
+}
+
+void aws_thread_decrement_unjoined_count(void) {
+ aws_mutex_lock(&s_managed_thread_lock);
+ --s_unjoined_thread_count;
+ aws_mutex_unlock(&s_managed_thread_lock);
+ aws_condition_variable_notify_one(&s_managed_thread_signal);
+}
+
+size_t aws_thread_get_managed_thread_count(void) {
+ size_t thread_count = 0;
+ aws_mutex_lock(&s_managed_thread_lock);
+ thread_count = s_unjoined_thread_count;
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ return thread_count;
+}
+
+static bool s_one_or_fewer_managed_threads_unjoined(void *context) {
+ (void)context;
+ return s_unjoined_thread_count <= 1;
+}
+
+void aws_thread_set_managed_join_timeout_ns(uint64_t timeout_in_ns) {
+ aws_mutex_lock(&s_managed_thread_lock);
+ s_default_managed_join_timeout_ns = timeout_in_ns;
+ aws_mutex_unlock(&s_managed_thread_lock);
+}
+
+int aws_thread_join_all_managed(void) {
+ struct aws_linked_list join_list;
+
+ aws_mutex_lock(&s_managed_thread_lock);
+ uint64_t timeout_in_ns = s_default_managed_join_timeout_ns;
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ uint64_t now_in_ns = 0;
+ uint64_t timeout_timestamp_ns = 0;
+ if (timeout_in_ns > 0) {
+ aws_sys_clock_get_ticks(&now_in_ns);
+ timeout_timestamp_ns = now_in_ns + timeout_in_ns;
+ }
+
+ bool successful = true;
+ bool done = false;
+ while (!done) {
+ aws_mutex_lock(&s_managed_thread_lock);
+
+ /*
+ * We lazily join old threads as newer ones finish their thread function. This means that when called from
+ * the main thread, there will always be one last thread (whichever completion serialized last) that is our
+ * responsibility to join (as long as at least one managed thread was created). So we wait for a count <= 1
+ * rather than what you'd normally expect (0).
+ *
+ * Absent a timeout, we only terminate if there are no threads left so it is possible to spin-wait a while
+ * if there is a single thread still running.
+ */
+ if (timeout_timestamp_ns > 0) {
+ uint64_t wait_ns = 0;
+
+ /*
+ * now_in_ns is always refreshed right before this either outside the loop before the first iteration or
+ * after the previous wait when the overall timeout was checked.
+ */
+ if (now_in_ns <= timeout_timestamp_ns) {
+ wait_ns = timeout_timestamp_ns - now_in_ns;
+ }
+
+ aws_condition_variable_wait_for_pred(
+ &s_managed_thread_signal,
+ &s_managed_thread_lock,
+ wait_ns,
+ s_one_or_fewer_managed_threads_unjoined,
+ NULL);
+ } else {
+ aws_condition_variable_wait_pred(
+ &s_managed_thread_signal, &s_managed_thread_lock, s_one_or_fewer_managed_threads_unjoined, NULL);
+ }
+
+ done = s_unjoined_thread_count == 0;
+
+ aws_sys_clock_get_ticks(&now_in_ns);
+ if (timeout_timestamp_ns != 0 && now_in_ns >= timeout_timestamp_ns) {
+ done = true;
+ successful = false;
+ }
+
+ aws_linked_list_init(&join_list);
+
+ aws_linked_list_swap_contents(&join_list, &s_pending_join_managed_threads);
+
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ /*
+ * Join against any finished threads. These threads are guaranteed to:
+ * (1) Not be the current thread
+ * (2) Have already ran to user thread_function completion
+ *
+ * The number of finished threads on any iteration is at most one.
+ */
+ aws_thread_join_and_free_wrapper_list(&join_list);
+ }
+
+ return successful ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+void aws_thread_pending_join_add(struct aws_linked_list_node *node) {
+ struct aws_linked_list join_list;
+ aws_linked_list_init(&join_list);
+
+ aws_mutex_lock(&s_managed_thread_lock);
+ /*
+ * Swap out the pending join threads before adding this, otherwise we'd join against ourselves which won't work
+ */
+ aws_linked_list_swap_contents(&join_list, &s_pending_join_managed_threads);
+ aws_linked_list_push_back(&s_pending_join_managed_threads, node);
+ aws_mutex_unlock(&s_managed_thread_lock);
+
+ /*
+ * Join against any finished threads. This thread (it's only ever going to be at most one)
+ * is guaranteed to:
+ * (1) Not be the current thread
+ * (2) Has already ran to user thread_function completion
+ */
+ aws_thread_join_and_free_wrapper_list(&join_list);
+}
+
+void aws_thread_initialize_thread_management(void) {
+ aws_linked_list_init(&s_pending_join_managed_threads);
+}
diff --git a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report
index 86dc616078..e1a77b5cc8 100644
--- a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report
+++ b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.copyrights.report
@@ -51,7 +51,9 @@ BELONGS ya.make
include/aws/io/logging.h [5:5]
include/aws/io/message_pool.h [4:4]
include/aws/io/pipe.h [5:5]
- include/aws/io/pki_utils.h [4:4]
+ include/aws/io/pkcs11.h [4:4]
+ include/aws/io/private/pem_utils.h [5:5]
+ include/aws/io/private/pki_utils.h [4:4]
include/aws/io/private/tls_channel_handler_shared.h [4:4]
include/aws/io/retry_strategy.h [4:4]
include/aws/io/shared_library.h [5:5]
@@ -67,13 +69,14 @@ BELONGS ya.make
source/channel_bootstrap.c [2:2]
source/event_loop.c [2:2]
source/exponential_backoff_retry_strategy.c [2:2]
- source/file_utils_shared.c [2:2]
source/host_resolver.c [2:2]
source/io.c [2:2]
source/linux/epoll_event_loop.c [2:2]
source/message_pool.c [2:2]
+ source/pem_utils.c [2:2]
+ source/pkcs11.c [2:2]
+ source/pkcs11_private.h [5:5]
source/pki_utils.c [2:2]
- source/posix/file_utils.c [2:2]
source/posix/host_resolver.c [2:2]
source/posix/pipe.c [2:2]
source/posix/shared_library.c [2:2]
@@ -81,8 +84,33 @@ BELONGS ya.make
source/retry_strategy.c [2:2]
source/s2n/s2n_tls_channel_handler.c [2:2]
source/socket_channel_handler.c [2:2]
+ source/standard_retry_strategy.c [2:2]
source/statistics.c [2:2]
source/stream.c [2:2]
source/tls_channel_handler.c [2:2]
source/tls_channel_handler_shared.c [2:2]
source/uri.c [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 2e35409d3a27ad4f26ee063de186689a
+BELONGS ya.make
+ License text:
+ Copyright (c) OASIS Open 2016. All Rights Reserved.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ THIRD-PARTY-LICENSES.txt [3:3]
+
+KEEP COPYRIGHT_SERVICE_LABEL b3779e02b4352ffc4ebd94151bac83c8
+BELONGS ya.make
+ License text:
+ /* Copyright (c) OASIS Open 2016. All Rights Reserved./
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ source/pkcs11/v2.40/pkcs11.h [1:1]
+ source/pkcs11/v2.40/pkcs11f.h [1:1]
+ source/pkcs11/v2.40/pkcs11t.h [1:1]
diff --git a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report
index c0eaaca480..a036141036 100644
--- a/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report
+++ b/contrib/restricted/aws/aws-c-io/.yandex_meta/devtools.licenses.report
@@ -31,18 +31,18 @@
# FILE_INCLUDE - include all file data into licenses text file
# =======================
-KEEP Apache-2.0 1a2162d65587b1c6b4482cab8e65b94f
+KEEP Custom-Oasis-Pkcs11 2661c322534a1eac3a81a0e2c173c27e
BELONGS ya.make
- License text:
- \## License
- This library is licensed under the Apache 2.0 License.
+ Note: matched license text is too long. Read it in the source files.
Scancode info:
- Original SPDX id: Apache-2.0
+ Original SPDX id: LicenseRef-scancode-oasis-ipr-policy-2014
Score : 100.00
- Match type : NOTICE
- Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Match type : REFERENCE
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/oasis-ipr-policy-2014.LICENSE, https://www.oasis-open.org/policies-guidelines/ipr
Files with this license:
- README.md [13:15]
+ source/pkcs11/v2.40/pkcs11.h [2:5]
+ source/pkcs11/v2.40/pkcs11f.h [2:5]
+ source/pkcs11/v2.40/pkcs11t.h [2:5]
KEEP Apache-2.0 2b42edef8fa55315f34f2370b4715ca9
BELONGS ya.make
@@ -56,6 +56,19 @@ FILE_INCLUDE NOTICE found in files: LICENSE at line 107, LICENSE at line 110, LI
Files with this license:
LICENSE [2:202]
+KEEP Custom-Oasis-Pkcs11 34e6010c1f019f721ac79740e9f0a963
+BELONGS ya.make
+ License text:
+ FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR
+ ANY DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-warranty-disclaimer
+ Score : 19.35
+ Match type : TEXT
+ Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/warranty-disclaimer.LICENSE
+ Files with this license:
+ THIRD-PARTY-LICENSES.txt [29:30]
+
KEEP Apache-2.0 43f57e875cdc02e8385ff667f85d702e
BELONGS ya.make
License text:
@@ -81,6 +94,19 @@ BELONGS ya.make
Files with this license:
CONTRIBUTING.md [61:61]
+KEEP Apache-2.0 6c901454b872854c0dea3ec06b67701a
+BELONGS ya.make
+ License text:
+ \## License
+ This library is licensed under the Apache 2.0 License.
+ Scancode info:
+ Original SPDX id: Apache-2.0
+ Score : 100.00
+ Match type : NOTICE
+ Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
+ Files with this license:
+ README.md [13:15]
+
KEEP Apache-2.0 d591512e466bb957030b8857f753349e
BELONGS ya.make
License text:
@@ -101,7 +127,9 @@ BELONGS ya.make
include/aws/io/logging.h [6:6]
include/aws/io/message_pool.h [5:5]
include/aws/io/pipe.h [6:6]
- include/aws/io/pki_utils.h [5:5]
+ include/aws/io/pkcs11.h [5:5]
+ include/aws/io/private/pem_utils.h [6:6]
+ include/aws/io/private/pki_utils.h [5:5]
include/aws/io/private/tls_channel_handler_shared.h [5:5]
include/aws/io/retry_strategy.h [5:5]
include/aws/io/shared_library.h [6:6]
@@ -117,13 +145,14 @@ BELONGS ya.make
source/channel_bootstrap.c [3:3]
source/event_loop.c [3:3]
source/exponential_backoff_retry_strategy.c [3:3]
- source/file_utils_shared.c [3:3]
source/host_resolver.c [3:3]
source/io.c [3:3]
source/linux/epoll_event_loop.c [3:3]
source/message_pool.c [3:3]
+ source/pem_utils.c [3:3]
+ source/pkcs11.c [3:3]
+ source/pkcs11_private.h [6:6]
source/pki_utils.c [3:3]
- source/posix/file_utils.c [3:3]
source/posix/host_resolver.c [3:3]
source/posix/pipe.c [3:3]
source/posix/shared_library.c [3:3]
@@ -131,12 +160,24 @@ BELONGS ya.make
source/retry_strategy.c [3:3]
source/s2n/s2n_tls_channel_handler.c [3:3]
source/socket_channel_handler.c [3:3]
+ source/standard_retry_strategy.c [3:3]
source/statistics.c [3:3]
source/stream.c [3:3]
source/tls_channel_handler.c [3:3]
source/tls_channel_handler_shared.c [3:3]
source/uri.c [3:3]
+KEEP Custom-Oasis-Pkcs11 e561d19ebbe9cbf3e19e2ad68aca5ade
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: LicenseRef-scancode-ecma-documentation
+ Score : 93.43
+ Match type : TEXT
+ Links : http://www.ecma-international.org/publications/DISCLAIMER.pdf, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/ecma-documentation.LICENSE
+ Files with this license:
+ THIRD-PARTY-LICENSES.txt [10:29]
+
SKIP LicenseRef-scancode-generic-cla ee24fdc60600747c7d12c32055b0011d
BELONGS ya.make
# Skip CLA
diff --git a/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt b/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt
index 15c55379f0..aa9cdb63ae 100644
--- a/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt
+++ b/contrib/restricted/aws/aws-c-io/.yandex_meta/licenses.list.txt
@@ -215,9 +215,52 @@ This library is licensed under the Apache 2.0 License.
SPDX-License-Identifier: Apache-2.0.
====================COPYRIGHT====================
+/* Copyright (c) OASIS Open 2016. All Rights Reserved./
+
+
+====================COPYRIGHT====================
+Copyright (c) OASIS Open 2016. All Rights Reserved.
+
+
+====================COPYRIGHT====================
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+====================Custom-Oasis-Pkcs11====================
+ * /Distributed under the terms of the OASIS IPR Policy,
+ * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
+ * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
+
+
+====================Custom-Oasis-Pkcs11====================
+FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR
+ANY DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE
+
+
+====================Custom-Oasis-Pkcs11====================
+This document and translations of it may be copied and furnished to others, and
+derivative works that comment on or otherwise explain it or assist in its
+implementation may be prepared, copied, published, and distributed, in whole or
+in part, without restriction of any kind, provided that the above copyright
+notice and this section are included on all such copies and derivative works.
+However, this document itself may not be modified in any way, including by
+removing the copyright notice or references to OASIS, except as needed for the
+purpose of developing any document or deliverable produced by an OASIS
+Technical Committee (in which case the rules applicable to copyrights, as set
+forth in the OASIS IPR Policy, must be followed) or as required to translate it
+into languages other than English.
+
+The limited permissions granted above are perpetual and will not be revoked by
+OASIS or its successors or assigns.
+
+This document and the information contained herein is provided on an "AS IS"
+basis and OASIS DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT
+LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT
+INFRINGE ANY OWNERSHIP RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR
+FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR
+
+
====================File: NOTICE====================
AWS C Io
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff --git a/contrib/restricted/aws/aws-c-io/CMakeLists.darwin.txt b/contrib/restricted/aws/aws-c-io/CMakeLists.darwin.txt
index 79441190a1..b3c484c982 100644
--- a/contrib/restricted/aws/aws-c-io/CMakeLists.darwin.txt
+++ b/contrib/restricted/aws/aws-c-io/CMakeLists.darwin.txt
@@ -8,6 +8,28 @@
add_library(restricted-aws-aws-c-io)
+target_compile_options(restricted-aws-aws-c-io PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_ADX
+ -DS2N_BIKE_R3_AVX2
+ -DS2N_BIKE_R3_AVX512
+ -DS2N_BIKE_R3_PCLMUL
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_HAVE_EXECINFO
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_SIKE_P434_R3_ASM
+ -DS2N___RESTRICT__SUPPORTED
+)
target_include_directories(restricted-aws-aws-c-io PUBLIC
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/include
)
@@ -25,12 +47,12 @@ target_sources(restricted-aws-aws-c-io PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/event_loop.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/file_utils_shared.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/host_resolver.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/io.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/message_pool.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/pem_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/pkcs11.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/pki_utils.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/file_utils.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/host_resolver.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/pipe.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/shared_library.c
@@ -38,6 +60,7 @@ target_sources(restricted-aws-aws-c-io PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/retry_strategy.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/socket_channel_handler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/standard_retry_strategy.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/statistics.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/stream.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c
diff --git a/contrib/restricted/aws/aws-c-io/CMakeLists.linux.txt b/contrib/restricted/aws/aws-c-io/CMakeLists.linux.txt
index 3dbd8ff9af..7290f20d3a 100644
--- a/contrib/restricted/aws/aws-c-io/CMakeLists.linux.txt
+++ b/contrib/restricted/aws/aws-c-io/CMakeLists.linux.txt
@@ -8,6 +8,28 @@
add_library(restricted-aws-aws-c-io)
+target_compile_options(restricted-aws-aws-c-io PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_ADX
+ -DS2N_BIKE_R3_AVX2
+ -DS2N_BIKE_R3_AVX512
+ -DS2N_BIKE_R3_PCLMUL
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_HAVE_EXECINFO
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_SIKE_P434_R3_ASM
+ -DS2N___RESTRICT__SUPPORTED
+)
target_include_directories(restricted-aws-aws-c-io PUBLIC
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/include
)
@@ -25,12 +47,12 @@ target_sources(restricted-aws-aws-c-io PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/event_loop.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/file_utils_shared.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/host_resolver.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/io.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/message_pool.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/pem_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/pkcs11.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/pki_utils.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/file_utils.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/host_resolver.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/pipe.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/posix/shared_library.c
@@ -38,6 +60,7 @@ target_sources(restricted-aws-aws-c-io PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/retry_strategy.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/socket_channel_handler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/standard_retry_strategy.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/statistics.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/stream.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c
diff --git a/contrib/restricted/aws/aws-c-io/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-io/CONTRIBUTING.md
index f0c7bd1031..70a884a2b4 100644
--- a/contrib/restricted/aws/aws-c-io/CONTRIBUTING.md
+++ b/contrib/restricted/aws/aws-c-io/CONTRIBUTING.md
@@ -1,9 +1,9 @@
# Contributing Guidelines
-Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
documentation, we greatly value feedback and contributions from our community.
-Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
information to effectively respond to your bug report or contribution.
@@ -11,7 +11,7 @@ information to effectively respond to your bug report or contribution.
We welcome you to use the GitHub issue tracker to report bugs or suggest features.
-When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-io/issues), or [recently closed](https://github.com/awslabs/aws-c-io/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-io/issues), or [recently closed](https://github.com/awslabs/aws-c-io/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
* A reproducible test case or series of steps
@@ -23,7 +23,7 @@ reported the issue. Please try to include as much information as you can. Detail
## Contributing via Pull Requests
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
-1. You are working against the latest source on the *master* branch.
+1. You are working against the latest source on the *main* branch.
2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
@@ -36,17 +36,17 @@ To send us a pull request, please:
5. Send us a pull request, answering any default questions in the pull request interface.
6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
-GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
## Finding contributions to work on
-Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-io/labels/help%20wanted) issues is a great place to start.
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-io/labels/help%20wanted) issues is a great place to start.
## Code of Conduct
-This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
-For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.
@@ -56,6 +56,6 @@ If you discover a potential security issue in this project we ask that you notif
## Licensing
-See the [LICENSE](https://github.com/awslabs/aws-c-io/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+See the [LICENSE](https://github.com/awslabs/aws-c-io/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-c-io/PKCS11.md b/contrib/restricted/aws/aws-c-io/PKCS11.md
new file mode 100644
index 0000000000..b1b9e0c988
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/PKCS11.md
@@ -0,0 +1,55 @@
+# PKCS#11 tests
+
+To run the PKCS#11 tests, configure cmake with: `-DENABLE_PKCS11_TESTS=ON`
+
+and set the following environment variables:
+
+```
+TEST_PKCS11_LIB = <path-to-shared-lib>
+TEST_PKCS11_TOKEN_DIR = <path-to-softhsm-token-dir>
+```
+TEST_PKCS11_LIB is used by the tests to peform pkcs11 operations.
+
+TEST_PKCS11_TOKEN_DIR is used by the tests to clear the softhsm tokens before a test begins. This is achieved by cleaning the token directory <b>NOTE: Any tokens created outside the tests will be cleaned up along with all the objects/keys on it as part of the tests.</b>
+
+
+## The suggested way to set up your machine
+1) Install [SoftHSM2](https://www.opendnssec.org/softhsm/) via brew / apt / apt-get / yum:
+ ```
+ > apt install softhsm
+ ```
+
+ Check that it's working:
+ ```
+ > softhsm2-util --show-slots
+ ```
+
+ If this spits out an error message, create a config file:
+ * Default location: `~/.config/softhsm2/softhsm2.conf`
+ * This file must specify token dir, default value is:
+ ```
+ directories.tokendir = /usr/local/var/lib/softhsm/tokens/
+ ```
+
+2) Set env vars like so:
+ ```
+ TEST_PKCS11_LIB = <path to libsofthsm2.so>
+ TEST_PKCS11_TOKEN_DIR = /usr/local/var/lib/softhsm/tokens/
+ ```
+
+
+3) [Example to import your keys, Not used by tests] Create token and private key
+
+ You can use any values for the labels, pin, key, cert, CA etc.
+ Here are copy-paste friendly commands for using files available in this repo.
+ ```
+ > softhsm2-util --init-token --free --label my-test-token --pin 0000 --so-pin 0000
+ ```
+
+ Note which slot the token ended up in
+
+ ```
+ > softhsm2-util --import tests/resources/unittests.p8 --slot <slot-with-token> --label my-test-key --id BEEFCAFE --pin 0000
+ ```
+ <b>WARN: All tokens created outside the tests would be cleaned up as part of the tests, Use a separate token directory for running the tests if you would like to keep your tokens intact.</b>
+
diff --git a/contrib/restricted/aws/aws-c-io/README.md b/contrib/restricted/aws/aws-c-io/README.md
index 5f24ba57a8..d6b2a97695 100644
--- a/contrib/restricted/aws/aws-c-io/README.md
+++ b/contrib/restricted/aws/aws-c-io/README.md
@@ -18,54 +18,37 @@ This library is licensed under the Apache 2.0 License.
### Building
-#### Building s2n (Linux Only)
+CMake 3.1+ is required to build.
-If you are building on Linux, you will need to build s2n before being able to build aws-c-io. For our CRT's, we build s2n at a specific commit, and recommend doing the same when using it with this library. That commit hash can be found [here](https://github.com/awslabs/aws-crt-cpp/tree/master/aws-common-runtime). The commands below will build s2n using OpenSSL 1.1.1. For using other versions of OpenSSL, there is additional information in the [s2n Usage Guide](https://github.com/awslabs/s2n/blob/master/docs/USAGE-GUIDE.md).
+`<install-path>` must be an absolute path in the following instructions.
+
+#### Linux-Only Dependencies
+
+If you are building on Linux, you will need to build aws-lc and s2n-tls first.
```
-git clone git@github.com:awslabs/s2n.git
-cd s2n
-git checkout <s2n-commit-hash-used-by-aws-crt-cpp>
-
-# We keep the build artifacts in the -build directory
-cd libcrypto-build
-
-# Download the latest version of OpenSSL
-curl -LO https://www.openssl.org/source/openssl-1.1.1-latest.tar.gz
-tar -xzvf openssl-1.1.1-latest.tar.gz
-
-# Build openssl libcrypto. Note that the install path specified here must be absolute.
-cd `tar ztf openssl-1.1.1-latest.tar.gz | head -n1 | cut -f1 -d/`
-./config -fPIC no-shared \
- no-md2 no-rc5 no-rfc3779 no-sctp no-ssl-trace no-zlib \
- no-hw no-mdc2 no-seed no-idea enable-ec_nistp_64_gcc_128 no-camellia\
- no-bf no-ripemd no-dsa no-ssl2 no-ssl3 no-capieng \
- -DSSL_FORBID_ENULL -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS \
- --prefix=<absolute-install-path>
-make
-make install
-
-# Build s2n
-cd ../../../
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S s2n -B s2n/build
-cmake --build s2n/build --target install
+git clone git@github.com:awslabs/aws-lc.git
+cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-lc/build --target install
+
+git clone git@github.com:aws/s2n-tls.git
+cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build s2n-tls/build --target install
```
#### Building aws-c-io and Remaining Dependencies
-Note that aws-c-io has a dependency on aws-c-common so it must be built first:
-
```
git clone git@github.com:awslabs/aws-c-common.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-common -B aws-c-common/build
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
cmake --build aws-c-common/build --target install
git clone git@github.com:awslabs/aws-c-cal.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-cal -B aws-c-cal/build
+cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
cmake --build aws-c-cal/build --target install
git clone git@github.com:awslabs/aws-c-io.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-io -B aws-c-io/build
+cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
cmake --build aws-c-io/build --target install
```
@@ -230,7 +213,10 @@ BSD Variants | s2n
Apple Devices | Security Framework/ Secure Transport. See https://developer.apple.com/documentation/security/secure_transport
Windows | Secure Channel. See https://msdn.microsoft.com/en-us/library/windows/desktop/aa380123(v=vs.85).aspx
-In addition, you can always write your own handler around your favorite implementation and use that.
+In addition, you can always write your own handler around your favorite implementation and use that. To provide your own
+TLS implementation, you must build this library with the cmake argument `-DBYO_CRYPTO=ON`. You will no longer need s2n or
+libcrypto once you do this. Instead, your application provides an implementation of `aws_tls_ctx`, and `aws_channel_handler`.
+At startup time, you must invoke the functions: `aws_tls_byo_crypto_set_client_setup_options()` and `aws_tls_byo_crypto_set_server_setup_options()`.
### Typical Channel
![Typical Channel Diagram](docs/images/typical_channel.png)
diff --git a/contrib/restricted/aws/aws-c-io/THIRD-PARTY-LICENSES.txt b/contrib/restricted/aws/aws-c-io/THIRD-PARTY-LICENSES.txt
new file mode 100644
index 0000000000..ae301c689c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/THIRD-PARTY-LICENSES.txt
@@ -0,0 +1,31 @@
+** PKCS#11 Headers; version 2.40 -- http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/errata01/os/include/pkcs11-v2.40/
+
+Copyright (c) OASIS Open 2016. All Rights Reserved.
+
+All capitalized terms in the following text have the meanings assigned to them
+in the OASIS Intellectual Property Rights Policy (the "OASIS IPR Policy"). The
+full Policy may be found at the OASIS website:
+[http://www.oasis-open.org/policies-guidelines/ipr]
+
+This document and translations of it may be copied and furnished to others, and
+derivative works that comment on or otherwise explain it or assist in its
+implementation may be prepared, copied, published, and distributed, in whole or
+in part, without restriction of any kind, provided that the above copyright
+notice and this section are included on all such copies and derivative works.
+However, this document itself may not be modified in any way, including by
+removing the copyright notice or references to OASIS, except as needed for the
+purpose of developing any document or deliverable produced by an OASIS
+Technical Committee (in which case the rules applicable to copyrights, as set
+forth in the OASIS IPR Policy, must be followed) or as required to translate it
+into languages other than English.
+
+The limited permissions granted above are perpetual and will not be revoked by
+OASIS or its successors or assigns.
+
+This document and the information contained herein is provided on an "AS IS"
+basis and OASIS DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT
+LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT
+INFRINGE ANY OWNERSHIP RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR
+FITNESS FOR A PARTICULAR PURPOSE. OASIS AND ITS MEMBERS WILL NOT BE LIABLE FOR
+ANY DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE
+OF THIS DOCUMENT OR ANY PART THEREOF.
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h b/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h
index 2e8f13b915..59b7f0d686 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/channel.h
@@ -119,6 +119,12 @@ struct aws_channel_handler_vtable {
* associated with the channel's handler chain.
*/
void (*gather_statistics)(struct aws_channel_handler *handler, struct aws_array_list *stats_list);
+
+ /*
+ * If this handler represents a source of data (like the socket_handler), then this will trigger a read
+ * from the data source.
+ */
+ void (*trigger_read)(struct aws_channel_handler *handler);
};
struct aws_channel_handler {
@@ -467,6 +473,15 @@ size_t aws_channel_handler_initial_window_size(struct aws_channel_handler *handl
AWS_IO_API
struct aws_channel_slot *aws_channel_get_first_slot(struct aws_channel *channel);
+/**
+ * A way for external processes to force a read by the data-source channel handler. Necessary in certain cases, like
+ * when a server channel finishes setting up its initial handlers, a read may have already been triggered on the
+ * socket (the client's CLIENT_HELLO tls payload, for example) and absent further data/notifications, this data
+ * would never get processed.
+ */
+AWS_IO_API
+int aws_channel_trigger_read(struct aws_channel *channel);
+
AWS_EXTERN_C_END
#endif /* AWS_IO_CHANNEL_H */
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h b/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h
index 7e32ca8073..ac1405b723 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/channel_bootstrap.h
@@ -163,6 +163,11 @@ struct aws_server_bootstrap {
* setup_callback - callback invoked once the channel is ready for use and TLS has been negotiated or if an error
* is encountered
* shutdown_callback - callback invoked once the channel has shutdown.
+ * enable_read_back_pressure - controls whether or not back pressure will be applied in the channel
+ * user_data - arbitrary data to pass back to the various callbacks
+ * requested_event_loop - if set, the connection will be placed on the requested event loop rather than one
+ * chosen internally from the bootstrap's associated event loop group. It is an error to pass in an event loop
+ * that is not associated with the bootstrap's event loop group.
*
* Immediately after the `shutdown_callback` returns, the channel is cleaned up automatically. All callbacks are invoked
* in the thread of the event-loop that the new channel is assigned to.
@@ -179,6 +184,7 @@ struct aws_socket_channel_bootstrap_options {
aws_client_bootstrap_on_channel_event_fn *shutdown_callback;
bool enable_read_back_pressure;
void *user_data;
+ struct aws_event_loop *requested_event_loop;
};
/**
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h b/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h
index 5687f91c92..32d6268697 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/event_loop.h
@@ -9,6 +9,7 @@
#include <aws/common/atomics.h>
#include <aws/common/hash_table.h>
#include <aws/common/ref_count.h>
+
#include <aws/io/io.h>
enum aws_io_event_type {
@@ -21,9 +22,9 @@ enum aws_io_event_type {
struct aws_event_loop;
struct aws_task;
+struct aws_thread_options;
#if AWS_USE_IO_COMPLETION_PORTS
-# include <Windows.h>
struct aws_overlapped;
@@ -34,6 +35,26 @@ typedef void(aws_event_loop_on_completion_fn)(
size_t num_bytes_transferred);
/**
+ * The aws_win32_OVERLAPPED struct is layout-compatible with OVERLAPPED as defined in <Windows.h>. It is used
+ * here to avoid pulling in a dependency on <Windows.h> which would also bring along a lot of bad macros, such
+ * as redefinitions of GetMessage and GetObject. Note that the OVERLAPPED struct layout in the Windows SDK can
+ * never be altered without breaking binary compatibility for every existing third-party executable, so there
+ * is no need to worry about keeping this definition in sync.
+ */
+struct aws_win32_OVERLAPPED {
+ uintptr_t Internal;
+ uintptr_t InternalHigh;
+ union {
+ struct {
+ uint32_t Offset;
+ uint32_t OffsetHigh;
+ } s;
+ void *Pointer;
+ } u;
+ void *hEvent;
+};
+
+/**
* Use aws_overlapped when a handle connected to the event loop needs an OVERLAPPED struct.
* OVERLAPPED structs are needed to make OS-level async I/O calls.
* When the I/O completes, the assigned aws_event_loop_on_completion_fn is called from the event_loop's thread.
@@ -42,7 +63,7 @@ typedef void(aws_event_loop_on_completion_fn)(
* aws_overlapped_reset() or aws_overlapped_init() between uses.
*/
struct aws_overlapped {
- OVERLAPPED overlapped;
+ struct aws_win32_OVERLAPPED overlapped;
aws_event_loop_on_completion_fn *on_completion;
void *user_data;
};
@@ -85,6 +106,10 @@ struct aws_event_loop {
struct aws_allocator *alloc;
aws_io_clock_fn *clock;
struct aws_hash_table local_data;
+ struct aws_atomic_var current_load_factor;
+ uint64_t latest_tick_start;
+ size_t current_tick_latency_sum;
+ struct aws_atomic_var next_flush_time;
void *impl_data;
};
@@ -97,13 +122,19 @@ struct aws_event_loop_local_object {
aws_event_loop_on_local_object_removed_fn *on_object_removed;
};
-typedef struct aws_event_loop *(
- aws_new_event_loop_fn)(struct aws_allocator *alloc, aws_io_clock_fn *clock, void *new_loop_user_data);
+struct aws_event_loop_options {
+ aws_io_clock_fn *clock;
+ struct aws_thread_options *thread_options;
+};
+
+typedef struct aws_event_loop *(aws_new_event_loop_fn)(
+ struct aws_allocator *alloc,
+ const struct aws_event_loop_options *options,
+ void *new_loop_user_data);
struct aws_event_loop_group {
struct aws_allocator *allocator;
struct aws_array_list event_loops;
- struct aws_atomic_var current_index;
struct aws_ref_count ref_count;
struct aws_shutdown_callback_options shutdown_options;
};
@@ -126,6 +157,12 @@ void aws_overlapped_init(
*/
AWS_IO_API
void aws_overlapped_reset(struct aws_overlapped *overlapped);
+
+/**
+ * Casts an aws_overlapped pointer for use as a LPOVERLAPPED parameter to Windows API functions
+ */
+AWS_IO_API
+struct _OVERLAPPED *aws_overlapped_to_windows_overlapped(struct aws_overlapped *overlapped);
#endif /* AWS_USE_IO_COMPLETION_PORTS */
/**
@@ -135,6 +172,15 @@ AWS_IO_API
struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock);
/**
+ * Creates an instance of the default event loop implementation for the current architecture and operating system using
+ * extendable options.
+ */
+AWS_IO_API
+struct aws_event_loop *aws_event_loop_new_default_with_options(
+ struct aws_allocator *alloc,
+ const struct aws_event_loop_options *options);
+
+/**
* Invokes the destroy() fn for the event loop implementation.
* If the event loop is still in a running state, this function will block waiting on the event loop to shutdown.
* If you do not want this function to block, call aws_event_loop_stop() manually first.
@@ -207,6 +253,31 @@ AWS_IO_API
int aws_event_loop_stop(struct aws_event_loop *event_loop);
/**
+ * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the
+ * event-loop load balancer to take into account load when vending another event-loop to a caller.
+ *
+ * Call this function at the beginning of your event-loop tick: after wake-up, but before processing any IO or tasks.
+ */
+AWS_IO_API
+void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop);
+
+/**
+ * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the
+ * event-loop load balancer to take into account load when vending another event-loop to a caller.
+ *
+ * Call this function at the end of your event-loop tick: after processing IO and tasks.
+ */
+AWS_IO_API
+void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop);
+
+/**
+ * Returns the current load factor (however that may be calculated). If the event-loop is not invoking
+ * aws_event_loop_register_tick_start() and aws_event_loop_register_tick_end(), this value will always be 0.
+ */
+AWS_IO_API
+size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop);
+
+/**
* Blocks until the event loop stops completely.
* If you want to call aws_event_loop_run() again, you must call this after aws_event_loop_stop().
* It is not safe to call this function from inside the event loop thread.
@@ -327,10 +398,25 @@ struct aws_event_loop_group *aws_event_loop_group_new(
void *new_loop_user_data,
const struct aws_shutdown_callback_options *shutdown_options);
+/** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new
+ * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note:
+ * If el_count exceeds the number of hw threads in the cpu_group it will be ignored on the assumption that if you
+ * care about NUMA, you don't want hyper-threads doing your IO and you especially don't want IO on a different node.
+ */
+AWS_IO_API
+struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group(
+ struct aws_allocator *alloc,
+ aws_io_clock_fn *clock,
+ uint16_t el_count,
+ uint16_t cpu_group,
+ aws_new_event_loop_fn *new_loop_fn,
+ void *new_loop_user_data,
+ const struct aws_shutdown_callback_options *shutdown_options);
+
/**
* Initializes an event loop group with platform defaults. If max_threads == 0, then the
- * loop count will be the number of available processors on the machine. Otherwise, max_threads
- * will be the number of event loops in the group.
+ * loop count will be the number of available processors on the machine / 2 (to exclude hyper-threads).
+ * Otherwise, max_threads will be the number of event loops in the group.
*/
AWS_IO_API
struct aws_event_loop_group *aws_event_loop_group_new_default(
@@ -338,6 +424,22 @@ struct aws_event_loop_group *aws_event_loop_group_new_default(
uint16_t max_threads,
const struct aws_shutdown_callback_options *shutdown_options);
+/** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new
+ * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note:
+ * If el_count exceeds the number of hw threads in the cpu_group it will be clamped to the number of hw threads
+ * on the assumption that if you care about NUMA, you don't want hyper-threads doing your IO and you especially
+ * don't want IO on a different node.
+ *
+ * If max_threads == 0, then the
+ * loop count will be the number of available processors in the cpu_group / 2 (to exclude hyper-threads)
+ */
+AWS_IO_API
+struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group(
+ struct aws_allocator *alloc,
+ uint16_t max_threads,
+ uint16_t cpu_group,
+ const struct aws_shutdown_callback_options *shutdown_options);
+
/**
* Increments the reference count on the event loop group, allowing the caller to take a reference to it.
*
@@ -361,8 +463,8 @@ size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group
/**
* Fetches the next loop for use. The purpose is to enable load balancing across loops. You should not depend on how
- * this load balancing is done as it is subject to change in the future. Currently it just returns them round-robin
- * style.
+ * this load balancing is done as it is subject to change in the future. Currently it uses the "best-of-two" algorithm
+ * based on the load factor of each loop.
*/
AWS_IO_API
struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group);
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/file_utils.h b/contrib/restricted/aws/aws-c-io/include/aws/io/file_utils.h
index 0fc4e54ae7..81cf387bd6 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/file_utils.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/file_utils.h
@@ -6,67 +6,7 @@
* SPDX-License-Identifier: Apache-2.0.
*/
-#include <aws/io/io.h>
-
-AWS_EXTERN_C_BEGIN
-
-/**
- * Reads 'filename' into 'out_buf'. If successful, 'out_buf' is allocated and filled with the data;
- * It is your responsibility to call 'aws_byte_buf_clean_up()' on it. Otherwise, 'out_buf' remains
- * unused. In the very unfortunate case where some API needs to treat out_buf as a c_string, a null terminator
- * is appended, but is not included as part of the length field.
- */
-AWS_IO_API int aws_byte_buf_init_from_file(
- struct aws_byte_buf *out_buf,
- struct aws_allocator *alloc,
- const char *filename);
-
-/**
- * Returns true iff the character is a directory separator on ANY supported platform.
- */
-AWS_IO_API
-bool aws_is_any_directory_separator(char value);
-
-/**
- * Returns the directory separator used by the local platform
- */
-AWS_IO_API
-char aws_get_platform_directory_separator(void);
-
-/**
- * Returns the current user's home directory.
- */
-AWS_IO_API
-struct aws_string *aws_get_home_directory(struct aws_allocator *allocator);
-
-/**
- * Returns true if a file or path exists, otherwise, false.
- */
-AWS_IO_API
-bool aws_path_exists(const char *path);
-
-/*
- * Wrapper for highest-resolution platform-dependent seek implementation.
- * Maps to:
- *
- * _fseeki64() on windows
- * fseeko() on linux
- *
- * whence can either be SEEK_SET or SEEK_END
- */
-AWS_IO_API
-int aws_fseek(FILE *file, aws_off_t offset, int whence);
-
-/*
- * Wrapper for os-specific file length query. We can't use fseek(END, 0)
- * because support for it is not technically required.
- *
- * Unix flavors call fstat, while Windows variants use GetFileSize on a
- * HANDLE queried from the libc FILE pointer.
- */
-AWS_IO_API
-int aws_file_get_length(FILE *file, int64_t *length);
-
-AWS_EXTERN_C_END
+/* Just shim the code that's in to common, maintain the public interface */
+#include <aws/common/file.h>
#endif /* AWS_IO_FILE_UTILS_H */
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h b/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h
index 2fb8ff7c94..a9950cad75 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/host_resolver.h
@@ -116,6 +116,13 @@ struct aws_host_resolver {
struct aws_shutdown_callback_options shutdown_options;
};
+struct aws_host_resolver_default_options {
+ size_t max_entries;
+ struct aws_event_loop_group *el_group;
+ const struct aws_shutdown_callback_options *shutdown_options;
+ aws_io_clock_fn *system_clock_override_fn;
+};
+
AWS_EXTERN_C_BEGIN
/**
@@ -176,9 +183,7 @@ AWS_IO_API int aws_default_dns_resolve(
*/
AWS_IO_API struct aws_host_resolver *aws_host_resolver_new_default(
struct aws_allocator *allocator,
- size_t max_entries,
- struct aws_event_loop_group *el_group,
- const struct aws_shutdown_callback_options *shutdown_options);
+ struct aws_host_resolver_default_options *options);
/**
* Increments the reference count on the host resolver, allowing the caller to take a reference to it.
@@ -238,6 +243,22 @@ typedef void(aws_host_listener_resolved_address_fn)(
/* User data that was specified when adding the listener. */
void *user_data);
+/* Callback for learning of an expired address from a listener. Memory for the expired address list is only guaranteed
+ * to exist during the callback, and must be copied if the caller needs it to persist after. */
+typedef void(aws_host_listener_expired_address_fn)(
+ /* Listener that owns this callback. */
+ struct aws_host_listener *listener,
+
+ /* Array list of aws_host_address structures. To get an item:
+ *
+ * struct aws_host_address *host_address = NULL;
+ * aws_array_list_get_at_ptr(new_address_list, (void **)&host_address, address_index);
+ * */
+ const struct aws_array_list *expired_address_list,
+
+ /* User data that was specified when adding the listener. */
+ void *user_data);
+
/* Callback for when the listener has completed its clean up. */
typedef void(aws_host_listener_shutdown_fn)(void *user_data);
@@ -249,11 +270,17 @@ struct aws_host_listener_options {
/* Callback for when an address is resolved for the specified host. */
aws_host_listener_resolved_address_fn *resolved_address_callback;
+ /* Callback for when a resolved address expires for the specified host. */
+ aws_host_listener_expired_address_fn *expired_address_callback;
+
/* Callback for when a listener has completely shutdown. */
aws_host_listener_shutdown_fn *shutdown_callback;
/* User data to be passed into each callback. */
void *user_data;
+
+ /* Lets the resolver know to keep the resolution thread alive for as long as this listener is attached */
+ bool pin_host_entry;
};
/* Create and add a listener to the host resolver using the specified options. */
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/io.h b/contrib/restricted/aws/aws-c-io/include/aws/io/io.h
index e6f06aecdc..9dc5039389 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/io.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/io.h
@@ -127,15 +127,123 @@ enum aws_io_errors {
AWS_IO_DNS_HOST_REMOVED_FROM_CACHE,
AWS_IO_STREAM_INVALID_SEEK_POSITION,
AWS_IO_STREAM_READ_FAILED,
- AWS_IO_INVALID_FILE_HANDLE,
+ DEPRECATED_AWS_IO_INVALID_FILE_HANDLE,
AWS_IO_SHARED_LIBRARY_LOAD_FAILURE,
AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE,
AWS_IO_TLS_NEGOTIATION_TIMEOUT,
AWS_IO_TLS_ALERT_NOT_GRACEFUL,
AWS_IO_MAX_RETRIES_EXCEEDED,
AWS_IO_RETRY_PERMISSION_DENIED,
+ AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED,
+ AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED,
- AWS_IO_ERROR_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_IO_PACKAGE_ID)
+ AWS_ERROR_PKCS11_VERSION_UNSUPPORTED,
+ AWS_ERROR_PKCS11_TOKEN_NOT_FOUND,
+ AWS_ERROR_PKCS11_KEY_NOT_FOUND,
+ AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED,
+ AWS_ERROR_PKCS11_UNKNOWN_CRYPTOKI_RETURN_VALUE,
+
+ /* PKCS#11 "CKR_" (Cryptoki Return Value) as AWS error-codes */
+ AWS_ERROR_PKCS11_CKR_CANCEL,
+ AWS_ERROR_PKCS11_CKR_HOST_MEMORY,
+ AWS_ERROR_PKCS11_CKR_SLOT_ID_INVALID,
+ AWS_ERROR_PKCS11_CKR_GENERAL_ERROR,
+ AWS_ERROR_PKCS11_CKR_FUNCTION_FAILED,
+ AWS_ERROR_PKCS11_CKR_ARGUMENTS_BAD,
+ AWS_ERROR_PKCS11_CKR_NO_EVENT,
+ AWS_ERROR_PKCS11_CKR_NEED_TO_CREATE_THREADS,
+ AWS_ERROR_PKCS11_CKR_CANT_LOCK,
+ AWS_ERROR_PKCS11_CKR_ATTRIBUTE_READ_ONLY,
+ AWS_ERROR_PKCS11_CKR_ATTRIBUTE_SENSITIVE,
+ AWS_ERROR_PKCS11_CKR_ATTRIBUTE_TYPE_INVALID,
+ AWS_ERROR_PKCS11_CKR_ATTRIBUTE_VALUE_INVALID,
+ AWS_ERROR_PKCS11_CKR_ACTION_PROHIBITED,
+ AWS_ERROR_PKCS11_CKR_DATA_INVALID,
+ AWS_ERROR_PKCS11_CKR_DATA_LEN_RANGE,
+ AWS_ERROR_PKCS11_CKR_DEVICE_ERROR,
+ AWS_ERROR_PKCS11_CKR_DEVICE_MEMORY,
+ AWS_ERROR_PKCS11_CKR_DEVICE_REMOVED,
+ AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_INVALID,
+ AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_LEN_RANGE,
+ AWS_ERROR_PKCS11_CKR_FUNCTION_CANCELED,
+ AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_PARALLEL,
+ AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_SUPPORTED,
+ AWS_ERROR_PKCS11_CKR_KEY_HANDLE_INVALID,
+ AWS_ERROR_PKCS11_CKR_KEY_SIZE_RANGE,
+ AWS_ERROR_PKCS11_CKR_KEY_TYPE_INCONSISTENT,
+ AWS_ERROR_PKCS11_CKR_KEY_NOT_NEEDED,
+ AWS_ERROR_PKCS11_CKR_KEY_CHANGED,
+ AWS_ERROR_PKCS11_CKR_KEY_NEEDED,
+ AWS_ERROR_PKCS11_CKR_KEY_INDIGESTIBLE,
+ AWS_ERROR_PKCS11_CKR_KEY_FUNCTION_NOT_PERMITTED,
+ AWS_ERROR_PKCS11_CKR_KEY_NOT_WRAPPABLE,
+ AWS_ERROR_PKCS11_CKR_KEY_UNEXTRACTABLE,
+ AWS_ERROR_PKCS11_CKR_MECHANISM_INVALID,
+ AWS_ERROR_PKCS11_CKR_MECHANISM_PARAM_INVALID,
+ AWS_ERROR_PKCS11_CKR_OBJECT_HANDLE_INVALID,
+ AWS_ERROR_PKCS11_CKR_OPERATION_ACTIVE,
+ AWS_ERROR_PKCS11_CKR_OPERATION_NOT_INITIALIZED,
+ AWS_ERROR_PKCS11_CKR_PIN_INCORRECT,
+ AWS_ERROR_PKCS11_CKR_PIN_INVALID,
+ AWS_ERROR_PKCS11_CKR_PIN_LEN_RANGE,
+ AWS_ERROR_PKCS11_CKR_PIN_EXPIRED,
+ AWS_ERROR_PKCS11_CKR_PIN_LOCKED,
+ AWS_ERROR_PKCS11_CKR_SESSION_CLOSED,
+ AWS_ERROR_PKCS11_CKR_SESSION_COUNT,
+ AWS_ERROR_PKCS11_CKR_SESSION_HANDLE_INVALID,
+ AWS_ERROR_PKCS11_CKR_SESSION_PARALLEL_NOT_SUPPORTED,
+ AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY,
+ AWS_ERROR_PKCS11_CKR_SESSION_EXISTS,
+ AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY_EXISTS,
+ AWS_ERROR_PKCS11_CKR_SESSION_READ_WRITE_SO_EXISTS,
+ AWS_ERROR_PKCS11_CKR_SIGNATURE_INVALID,
+ AWS_ERROR_PKCS11_CKR_SIGNATURE_LEN_RANGE,
+ AWS_ERROR_PKCS11_CKR_TEMPLATE_INCOMPLETE,
+ AWS_ERROR_PKCS11_CKR_TEMPLATE_INCONSISTENT,
+ AWS_ERROR_PKCS11_CKR_TOKEN_NOT_PRESENT,
+ AWS_ERROR_PKCS11_CKR_TOKEN_NOT_RECOGNIZED,
+ AWS_ERROR_PKCS11_CKR_TOKEN_WRITE_PROTECTED,
+ AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_HANDLE_INVALID,
+ AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_SIZE_RANGE,
+ AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT,
+ AWS_ERROR_PKCS11_CKR_USER_ALREADY_LOGGED_IN,
+ AWS_ERROR_PKCS11_CKR_USER_NOT_LOGGED_IN,
+ AWS_ERROR_PKCS11_CKR_USER_PIN_NOT_INITIALIZED,
+ AWS_ERROR_PKCS11_CKR_USER_TYPE_INVALID,
+ AWS_ERROR_PKCS11_CKR_USER_ANOTHER_ALREADY_LOGGED_IN,
+ AWS_ERROR_PKCS11_CKR_USER_TOO_MANY_TYPES,
+ AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_INVALID,
+ AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_LEN_RANGE,
+ AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_HANDLE_INVALID,
+ AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_SIZE_RANGE,
+ AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_TYPE_INCONSISTENT,
+ AWS_ERROR_PKCS11_CKR_RANDOM_SEED_NOT_SUPPORTED,
+ AWS_ERROR_PKCS11_CKR_RANDOM_NO_RNG,
+ AWS_ERROR_PKCS11_CKR_DOMAIN_PARAMS_INVALID,
+ AWS_ERROR_PKCS11_CKR_CURVE_NOT_SUPPORTED,
+ AWS_ERROR_PKCS11_CKR_BUFFER_TOO_SMALL,
+ AWS_ERROR_PKCS11_CKR_SAVED_STATE_INVALID,
+ AWS_ERROR_PKCS11_CKR_INFORMATION_SENSITIVE,
+ AWS_ERROR_PKCS11_CKR_STATE_UNSAVEABLE,
+ AWS_ERROR_PKCS11_CKR_CRYPTOKI_NOT_INITIALIZED,
+ AWS_ERROR_PKCS11_CKR_CRYPTOKI_ALREADY_INITIALIZED,
+ AWS_ERROR_PKCS11_CKR_MUTEX_BAD,
+ AWS_ERROR_PKCS11_CKR_MUTEX_NOT_LOCKED,
+ AWS_ERROR_PKCS11_CKR_NEW_PIN_MODE,
+ AWS_ERROR_PKCS11_CKR_NEXT_OTP,
+ AWS_ERROR_PKCS11_CKR_EXCEEDED_MAX_ITERATIONS,
+ AWS_ERROR_PKCS11_CKR_FIPS_SELF_TEST_FAILED,
+ AWS_ERROR_PKCS11_CKR_LIBRARY_LOAD_FAILED,
+ AWS_ERROR_PKCS11_CKR_PIN_TOO_WEAK,
+ AWS_ERROR_PKCS11_CKR_PUBLIC_KEY_INVALID,
+ AWS_ERROR_PKCS11_CKR_FUNCTION_REJECTED,
+
+ AWS_ERROR_IO_PINNED_EVENT_LOOP_MISMATCH,
+
+ AWS_ERROR_PKCS11_ENCODING_ERROR,
+
+ AWS_IO_ERROR_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_IO_PACKAGE_ID),
+ AWS_IO_INVALID_FILE_HANDLE = AWS_ERROR_INVALID_FILE_HANDLE,
};
AWS_EXTERN_C_BEGIN
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h b/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h
index d50a4e4a41..a95e1d4acd 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/logging.h
@@ -28,6 +28,8 @@ enum aws_io_log_subject {
AWS_LS_IO_FILE_UTILS,
AWS_LS_IO_SHARED_LIBRARY,
AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY,
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ AWS_LS_IO_PKCS11,
AWS_IO_LS_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_IO_PACKAGE_ID)
};
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h b/contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h
new file mode 100644
index 0000000000..862f063104
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/pkcs11.h
@@ -0,0 +1,94 @@
+#ifndef AWS_IO_PKCS11_H
+#define AWS_IO_PKCS11_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/io/io.h>
+
+struct aws_allocator;
+
+/**
+ * Handle to a loaded PKCS#11 library.
+ */
+struct aws_pkcs11_lib;
+
+/**
+ * Controls how aws_pkcs11_lib calls C_Initialize() and C_Finalize() on the PKCS#11 library.
+ */
+enum aws_pkcs11_lib_behavior {
+ /**
+ * Default behavior that accommodates most use cases.
+ * C_Initialize() is called on creation, and "already-initialized" errors are ignored.
+ * C_Finalize() is never called, just in case another part of your
+ * application is still using the PKCS#11 library.
+ */
+ AWS_PKCS11_LIB_DEFAULT_BEHAVIOR,
+
+ /**
+ * Skip calling C_Initialize() and C_Finalize().
+ * Use this if your application has already initialized the PKCS#11 library,
+ * and you do not want C_Initialize() called again.
+ */
+ AWS_PKCS11_LIB_OMIT_INITIALIZE,
+
+ /**
+ * C_Initialize() is called on creation and C_Finalize() is called on cleanup.
+ * If C_Initialize() reports that's it's already initialized, this is treated as an error.
+ * Use this if you need perfect cleanup (ex: running valgrind with --leak-check).
+ */
+ AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE,
+};
+
+/* The enum above was misspelled, and later got fixed (pcks11 -> pkcs11).
+ * This macro maintain backwards compatibility with the old spelling */
+#define aws_pcks11_lib_behavior aws_pkcs11_lib_behavior
+
+/**
+ * Options for aws_pkcs11_lib_new()
+ */
+struct aws_pkcs11_lib_options {
+ /**
+ * Name of PKCS#11 library file to load (UTF-8).
+ * Zero out if your application is compiled with PKCS#11 symbols linked in.
+ */
+ struct aws_byte_cursor filename;
+
+ /**
+ * Behavior for calling C_Initialize() and C_Finalize() on the PKCS#11 library.
+ */
+ enum aws_pkcs11_lib_behavior initialize_finalize_behavior;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Load and initialize a PKCS#11 library.
+ * See `aws_pkcs11_lib_options` for options.
+ *
+ * If successful a valid pointer is returned. You must call aws_pkcs11_lib_release() when you are done with it.
+ * If unsuccessful, NULL is returned and an error is set.
+ */
+AWS_IO_API
+struct aws_pkcs11_lib *aws_pkcs11_lib_new(
+ struct aws_allocator *allocator,
+ const struct aws_pkcs11_lib_options *options);
+
+/**
+ * Acquire a reference to a PKCS#11 library, preventing it from being cleaned up.
+ * You must call aws_pkcs11_lib_release() when you are done with it.
+ * This function returns whatever was passed in. It cannot fail.
+ */
+AWS_IO_API
+struct aws_pkcs11_lib *aws_pkcs11_lib_acquire(struct aws_pkcs11_lib *pkcs11_lib);
+
+/**
+ * Release a reference to the PKCS#11 library.
+ * When the last reference is released, the library is cleaned up.
+ */
+AWS_IO_API
+void aws_pkcs11_lib_release(struct aws_pkcs11_lib *pkcs11_lib);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_IO_PKCS11_H */
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/private/pem_utils.h b/contrib/restricted/aws/aws-c-io/include/aws/io/private/pem_utils.h
new file mode 100644
index 0000000000..d5eb37ad2f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/private/pem_utils.h
@@ -0,0 +1,25 @@
+#ifndef AWS_IO_PEM_UTILS_H
+#define AWS_IO_PEM_UTILS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/io/io.h>
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Cleanup Function that parses the full PEM Chain object once and strip the comments out for the pem parser not
+ * handling the comments. The passed in pem will be cleaned up.
+ *
+ * - Garbage characters in-between PEM objects (characters before the first BEGIN or after an END and before the next
+ * BEGIN) are removed
+ *
+ * - AWS_ERROR_INVALID_ARGUMENT will be raised if the file contains no PEM encoded data.
+ */
+AWS_IO_API
+int aws_sanitize_pem(struct aws_byte_buf *pem, struct aws_allocator *allocator);
+
+AWS_EXTERN_C_END
+#endif /* AWS_IO_PEM_UTILS_H */
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/pki_utils.h b/contrib/restricted/aws/aws-c-io/include/aws/io/private/pki_utils.h
index 4b1be67ffd..8a99038a63 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/pki_utils.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/private/pki_utils.h
@@ -6,6 +6,19 @@
*/
#include <aws/io/io.h>
+#ifdef _WIN32
+/* It's ok to include external headers because this is a PRIVATE header file
+ * (it is usually a crime to include windows.h from header file) */
+# include <Windows.h>
+#endif /* _WIN32 */
+
+#ifdef AWS_OS_APPLE
+/* It's ok to include external headers because this is a PRIVATE header file */
+# include <CoreFoundation/CFArray.h>
+#endif /* AWS_OS_APPLE */
+
+struct aws_string;
+
AWS_EXTERN_C_BEGIN
/**
@@ -37,8 +50,6 @@ AWS_IO_API int aws_read_and_decode_pem_file_to_buffer_list(
struct aws_array_list *cert_chain_or_key);
#ifdef AWS_OS_APPLE
-struct __CFArray;
-typedef const struct __CFArray *CFArrayRef;
# if !defined(AWS_OS_IOS)
/**
* Imports a PEM armored PKCS#7 public/private key pair
@@ -49,7 +60,8 @@ int aws_import_public_and_private_keys_to_identity(
CFAllocatorRef cf_alloc,
const struct aws_byte_cursor *public_cert_chain,
const struct aws_byte_cursor *private_key,
- CFArrayRef *identity);
+ CFArrayRef *identity,
+ const struct aws_string *keychain_path);
# endif /* AWS_OS_IOS */
/**
@@ -85,9 +97,7 @@ void aws_release_certificates(CFArrayRef certs);
#endif /* AWS_OS_APPLE */
#ifdef _WIN32
-typedef void *HCERTSTORE;
-struct _CERT_CONTEXT;
-typedef const struct _CERT_CONTEXT *PCCERT_CONTEXT;
+
/**
* Returns AWS_OP_SUCCESS if we were able to successfully load the certificate and cert_store.
*
@@ -120,8 +130,12 @@ AWS_IO_API int aws_import_key_pair_to_cert_context(
struct aws_allocator *alloc,
const struct aws_byte_cursor *public_cert_chain,
const struct aws_byte_cursor *private_key,
+ bool is_client_mode,
HCERTSTORE *cert_store,
- PCCERT_CONTEXT *certs);
+ PCCERT_CONTEXT *certs,
+ HCRYPTPROV *crypto_provider,
+ HCRYPTKEY *private_key_handle);
+
#endif /* _WIN32 */
AWS_EXTERN_C_END
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/private/tls_channel_handler_shared.h b/contrib/restricted/aws/aws-c-io/include/aws/io/private/tls_channel_handler_shared.h
index cdaccc36e1..71f3aa48bd 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/private/tls_channel_handler_shared.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/private/tls_channel_handler_shared.h
@@ -34,6 +34,17 @@ AWS_IO_API void aws_on_tls_negotiation_completed(
struct aws_tls_channel_handler_shared *tls_handler_shared,
int error_code);
+/**
+ * Returns true if an aws_byte_buf on aws_tls_ctx_options was set by the user.
+ * Use this to determine whether a buf was set. DO NOT simply check if buf.len > 0.
+ *
+ * Reasoning:
+ * If the user calls a setter function but passes a 0 length file or cursor, buf.len will be zero.
+ * TLS should still respect the fact that the setter was called.
+ * TLS should not use defaults instead just because length is 0.
+ */
+AWS_IO_API bool aws_tls_options_buf_is_set(const struct aws_byte_buf *buf);
+
AWS_EXTERN_C_END
#endif /* AWS_IO_TLS_CHANNEL_HANDLER_SHARED_H */
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h b/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h
index f7540695f9..0c447aa71c 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/retry_strategy.h
@@ -16,7 +16,9 @@ struct aws_event_loop_group;
/**
* Invoked upon the acquisition, or failure to acquire a retry token. This function will always be invoked if and only
* if aws_retry_strategy_acquire_retry_token() returns AWS_OP_SUCCESS. It will never be invoked synchronously from
- * aws_retry_strategy_acquire_retry_token(). Token will always be NULL if error_code is non-zero, and vice-versa.
+ * aws_retry_strategy_acquire_retry_token(). Token will always be NULL if error_code is non-zero, and vice-versa. If
+ * token is non-null, it will have a reference count of 1, and you must call aws_retry_token_release() on it later. See
+ * the comments for aws_retry_strategy_on_retry_ready_fn for more info.
*/
typedef void(aws_retry_strategy_on_retry_token_acquired_fn)(
struct aws_retry_strategy *retry_strategy,
@@ -28,8 +30,8 @@ typedef void(aws_retry_strategy_on_retry_token_acquired_fn)(
* Invoked after a successful call to aws_retry_strategy_schedule_retry(). This function will always be invoked if and
* only if aws_retry_strategy_schedule_retry() returns AWS_OP_SUCCESS. It will never be invoked synchronously from
* aws_retry_strategy_schedule_retry(). After attempting the operation, either call aws_retry_strategy_schedule_retry()
- * with an aws_retry_error_type or call aws_retry_strategy_token_record_success() and then release the token via.
- * aws_retry_strategy_release_retry_token().
+ * with an aws_retry_error_type or call aws_retry_token_record_success() and then release the token via.
+ * aws_retry_token_release().
*/
typedef void(aws_retry_strategy_on_retry_ready_fn)(struct aws_retry_token *token, int error_code, void *user_data);
@@ -74,6 +76,7 @@ struct aws_retry_strategy {
struct aws_retry_token {
struct aws_allocator *allocator;
struct aws_retry_strategy *retry_strategy;
+ struct aws_atomic_var ref_count;
void *impl;
};
@@ -109,6 +112,12 @@ struct aws_exponential_backoff_retry_options {
uint64_t (*generate_random)(void);
};
+struct aws_standard_retry_options {
+ struct aws_exponential_backoff_retry_options backoff_retry_options;
+ /** capacity for partitions. Defaults to 500 */
+ size_t initial_bucket_capacity;
+};
+
AWS_EXTERN_C_BEGIN
/**
* Acquire a reference count on retry_strategy.
@@ -130,6 +139,7 @@ AWS_IO_API int aws_retry_strategy_acquire_retry_token(
aws_retry_strategy_on_retry_token_acquired_fn *on_acquired,
void *user_data,
uint64_t timeout_ms);
+
/**
* Schedules a retry based on the backoff and token based strategies. retry_ready is invoked when the retry is either
* ready for execution or if it has been canceled due to application shutdown.
@@ -149,12 +159,18 @@ AWS_IO_API int aws_retry_strategy_schedule_retry(
* some strategies such as exponential backoff will ignore this, but you should always call it after a successful
* operation or your system will never recover during an outage.
*/
-AWS_IO_API int aws_retry_strategy_token_record_success(struct aws_retry_token *token);
+AWS_IO_API int aws_retry_token_record_success(struct aws_retry_token *token);
+
+/**
+ * Increments reference count for token. This should be called any time you seat the token to a pointer you own.
+ */
+AWS_IO_API void aws_retry_token_acquire(struct aws_retry_token *token);
+
/**
* Releases the reference count for token. This should always be invoked after either calling
- * aws_retry_strategy_schedule_retry() and failing, or after calling aws_retry_strategy_token_record_success().
+ * aws_retry_strategy_schedule_retry() and failing, or after calling aws_retry_token_record_success().
*/
-AWS_IO_API void aws_retry_strategy_release_retry_token(struct aws_retry_token *token);
+AWS_IO_API void aws_retry_token_release(struct aws_retry_token *token);
/**
* Creates a retry strategy using exponential backoff. This strategy does not perform any bookkeeping on error types and
* success. There is no circuit breaker functionality in here. See the comments above for
@@ -163,6 +179,35 @@ AWS_IO_API void aws_retry_strategy_release_retry_token(struct aws_retry_token *t
AWS_IO_API struct aws_retry_strategy *aws_retry_strategy_new_exponential_backoff(
struct aws_allocator *allocator,
const struct aws_exponential_backoff_retry_options *config);
+
+/**
+ * This is a retry implementation that cuts off traffic if it's
+ * detected that an endpoint partition is having availability
+ * problems. This is necessary to keep from making outages worse
+ * by scheduling work that's unlikely to succeed yet increases
+ * load on an already ailing system.
+ *
+ * We do this by creating a bucket for each partition. A partition
+ * is an arbitrary specifier. It can be anything: a region, a service,
+ * a combination of region and service, a literal dns name.... doesn't matter.
+ *
+ * Each bucket has a budget for maximum allowed retries. Different types of events
+ * carry different weights. Things that indicate an unhealthy partition such as
+ * transient errors (timeouts, unhealthy connection etc...) cost more.
+ * A retry for any other reason (service sending a 5xx response code) cost a bit less.
+ * When a retry is attempted this capacity is leased out to the retry. On success it is
+ * released back to the capacity pool. On failure, it remains leased.
+ * Operations that succeed without a retry slowly restore the capacity pool.
+ *
+ * If a partition runs out of capacity it is assumed unhealthy and retries will be blocked
+ * until capacity returns to the pool. To prevent a partition from staying unhealthy after
+ * an outage has recovered, new requests that succeed without a retry will increase the capacity
+ * slowly ( a new request gets a payback lease of 1, but the lease is never actually deducted from the capacity pool).
+ */
+AWS_IO_API struct aws_retry_strategy *aws_retry_strategy_new_standard(
+ struct aws_allocator *allocator,
+ const struct aws_standard_retry_options *config);
+
AWS_EXTERN_C_END
#endif /* AWS_IO_CLIENT_RETRY_STRATEGY_H */
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h b/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h
index 70ac188de9..ff86de970a 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/stream.h
@@ -6,6 +6,7 @@
* SPDX-License-Identifier: Apache-2.0.
*/
+#include <aws/common/ref_count.h>
#include <aws/io/io.h>
struct aws_input_stream;
@@ -24,7 +25,7 @@ struct aws_stream_status {
};
struct aws_input_stream_vtable {
- int (*seek)(struct aws_input_stream *stream, aws_off_t offset, enum aws_stream_seek_basis basis);
+ int (*seek)(struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis);
/**
* Stream as much data as will fit into the destination buffer and update its length.
* The destination buffer's capacity MUST NOT be changed.
@@ -38,24 +39,50 @@ struct aws_input_stream_vtable {
int (*read)(struct aws_input_stream *stream, struct aws_byte_buf *dest);
int (*get_status)(struct aws_input_stream *stream, struct aws_stream_status *status);
int (*get_length)(struct aws_input_stream *stream, int64_t *out_length);
- void (*destroy)(struct aws_input_stream *stream);
+
+ /**
+ * Optional.
+ * If not set, the default aws_ref_count_acquire/release will be used.
+ * Set for high level language binding that has its own refcounting implementation and needs to be kept alive from
+ * C.
+ * If set, ref_count member will not be used.
+ */
+ void (*acquire)(struct aws_input_stream *stream);
+ void (*release)(struct aws_input_stream *stream);
};
+/**
+ * Base class for input streams.
+ * Note: when you implement one input stream, the ref_count needs to be initialized to clean up the resource when
+ * reaches to zero.
+ */
struct aws_input_stream {
- struct aws_allocator *allocator;
+ /* point to the impl only set if needed. */
void *impl;
- struct aws_input_stream_vtable *vtable;
+ const struct aws_input_stream_vtable *vtable;
+ struct aws_ref_count ref_count;
};
AWS_EXTERN_C_BEGIN
+/**
+ * Increments the reference count on the input stream, allowing the caller to take a reference to it.
+ *
+ * Returns the same input stream passed in.
+ */
+AWS_IO_API struct aws_input_stream *aws_input_stream_acquire(struct aws_input_stream *stream);
+
+/**
+ * Decrements a input stream's ref count. When the ref count drops to zero, the input stream will be destroyed.
+ *
+ * Returns NULL always.
+ */
+AWS_IO_API struct aws_input_stream *aws_input_stream_release(struct aws_input_stream *stream);
+
/*
* Seek to a position within a stream; analagous to fseek() and its relatives
*/
-AWS_IO_API int aws_input_stream_seek(
- struct aws_input_stream *stream,
- aws_off_t offset,
- enum aws_stream_seek_basis basis);
+AWS_IO_API int aws_input_stream_seek(struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis);
/*
* Read data from a stream. If data is available, will read up to the (capacity - len) open bytes
@@ -75,8 +102,8 @@ AWS_IO_API int aws_input_stream_get_status(struct aws_input_stream *stream, stru
*/
AWS_IO_API int aws_input_stream_get_length(struct aws_input_stream *stream, int64_t *out_length);
-/*
- * Tears down the stream
+/* DEPRECATED
+ * Tears down the stream. Equivalent to aws_input_stream_release()
*/
AWS_IO_API void aws_input_stream_destroy(struct aws_input_stream *stream);
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h b/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h
index 25f9b666b4..9e1c9d436c 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/tls_channel_handler.h
@@ -10,6 +10,7 @@
struct aws_channel_slot;
struct aws_channel_handler;
+struct aws_pkcs11_session;
struct aws_string;
enum aws_tls_versions {
@@ -28,6 +29,7 @@ enum aws_tls_cipher_pref {
AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_02 = 3,
AWS_IO_TLS_CIPHER_PREF_KMS_PQ_SIKE_TLSv1_0_2020_02 = 4,
AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_07 = 5,
+ AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05 = 6,
AWS_IO_TLS_CIPHER_PREF_END_RANGE = 0xFFFF
};
@@ -159,6 +161,15 @@ struct aws_tls_ctx_options {
* Password for the pkcs12 data in pkcs12.
*/
struct aws_byte_buf pkcs12_password;
+
+# if !defined(AWS_OS_IOS)
+ /**
+ * On Apple OS you can also use a custom keychain instead of
+ * the default keychain of the account.
+ */
+ struct aws_string *keychain_path;
+# endif
+
#endif
/** max tls fragment size. Default is the value of g_aws_channel_max_fragment_size. */
@@ -174,6 +185,25 @@ struct aws_tls_ctx_options {
* If you set this in server mode, it enforces client authentication.
*/
bool verify_peer;
+
+ /**
+ * For use when adding BYO_CRYPTO implementations. You can set extra data in here for use with your TLS
+ * implementation.
+ */
+ void *ctx_options_extension;
+
+ /**
+ * Set if using PKCS#11 for private key operations.
+ * See aws_tls_ctx_pkcs11_options for more details.
+ */
+ struct {
+ struct aws_pkcs11_lib *lib; /* required */
+ struct aws_string *user_pin; /* NULL if token uses "protected authentication path" */
+ struct aws_string *token_label; /* optional */
+ struct aws_string *private_key_object_label; /* optional */
+ uint64_t slot_id; /* optional */
+ bool has_slot_id;
+ } pkcs11;
};
struct aws_tls_negotiated_protocol_message {
@@ -195,6 +225,35 @@ enum aws_tls_negotiation_status {
AWS_TLS_NEGOTIATION_STATUS_FAILURE
};
+#ifdef BYO_CRYPTO
+/**
+ * Callback for creating a TLS handler. If you're using this you're using BYO_CRYPTO. This function should return
+ * a fully implemented aws_channel_handler instance for TLS. Note: the aws_tls_options passed to your
+ * aws_tls_handler_new_fn contains multiple callbacks. Namely: aws_tls_on_negotiation_result_fn. You are responsible for
+ * invoking this function when TLs session negotiation has completed.
+ */
+typedef struct aws_channel_handler *(aws_tls_handler_new_fn)(
+ struct aws_allocator *allocator,
+ struct aws_tls_connection_options *options,
+ struct aws_channel_slot *slot,
+ void *user_data);
+
+/**
+ * Invoked when it's time to start TLS negotiation. Note: the aws_tls_options passed to your aws_tls_handler_new_fn
+ * contains multiple callbacks. Namely: aws_tls_on_negotiation_result_fn. You are responsible for invoking this function
+ * when TLS session negotiation has completed.
+ */
+typedef int(aws_tls_client_handler_start_negotiation_fn)(struct aws_channel_handler *handler, void *user_data);
+
+struct aws_tls_byo_crypto_setup_options {
+ aws_tls_handler_new_fn *new_handler_fn;
+ /* ignored for server implementations, required for clients. */
+ aws_tls_client_handler_start_negotiation_fn *start_negotiation_fn;
+ void *user_data;
+};
+
+#endif /* BYO_CRYPTO */
+
AWS_EXTERN_C_BEGIN
/******************************** tls options init stuff ***********************/
@@ -209,13 +268,13 @@ AWS_IO_API void aws_tls_ctx_options_init_default_client(
*/
AWS_IO_API void aws_tls_ctx_options_clean_up(struct aws_tls_ctx_options *options);
-#if !defined(AWS_OS_IOS)
-
/**
* Initializes options for use with mutual tls in client mode.
* cert_path and pkey_path are paths to files on disk. cert_path
* and pkey_path are treated as PKCS#7 PEM armored. They are loaded
* from disk and stored in buffers internally.
+ *
+ * NOTE: This is unsupported on iOS.
*/
AWS_IO_API int aws_tls_ctx_options_init_client_mtls_from_path(
struct aws_tls_ctx_options *options,
@@ -227,6 +286,8 @@ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_from_path(
* Initializes options for use with mutual tls in client mode.
* cert and pkey are copied. cert and pkey are treated as PKCS#7 PEM
* armored.
+ *
+ * NOTE: This is unsupported on iOS.
*/
AWS_IO_API int aws_tls_ctx_options_init_client_mtls(
struct aws_tls_ctx_options *options,
@@ -235,6 +296,100 @@ AWS_IO_API int aws_tls_ctx_options_init_client_mtls(
const struct aws_byte_cursor *pkey);
/**
+ * This struct exists as a graceful way to pass many arguments when
+ * calling init-with-pkcs11 functions on aws_tls_ctx_options (this also makes
+ * it easy to introduce optional arguments in the future).
+ * Instances of this struct should only exist briefly on the stack.
+ *
+ * Instructions for binding this to high-level languages:
+ * - Python: The members of this struct should be the keyword args to the init-with-pkcs11 functions.
+ * - JavaScript: This should be an options map passed to init-with-pkcs11 functions.
+ * - Java: This should be an options class passed to init-with-pkcs11 functions.
+ * - C++: Same as Java
+ *
+ * Notes on integer types:
+ * PKCS#11 uses `unsigned long` for IDs, handles, etc but we expose them as `uint64_t` in public APIs.
+ * We do this because sizeof(long) is inconsistent across platform/arch/language
+ * (ex: always 64bit in Java, always 32bit in C on Windows, matches CPU in C on Linux and Apple).
+ * By using uint64_t in our public API, we can keep the careful bounds-checking all in one
+ * place, instead of expecting each high-level language binding to get it just right.
+ */
+struct aws_tls_ctx_pkcs11_options {
+ /**
+ * The PKCS#11 library to use.
+ * This field is required.
+ */
+ struct aws_pkcs11_lib *pkcs11_lib;
+
+ /**
+ * User PIN, for logging into the PKCS#11 token (UTF-8).
+ * Zero out to log into a token with a "protected authentication path".
+ */
+ struct aws_byte_cursor user_pin;
+
+ /**
+ * ID of slot containing PKCS#11 token.
+ * If set to NULL, the token will be chosen based on other criteria
+ * (such as token label).
+ */
+ const uint64_t *slot_id;
+
+ /**
+ * Label of PKCS#11 token to use.
+ * If zeroed out, the token will be chosen based on other criteria
+ * (such as slot ID).
+ */
+ struct aws_byte_cursor token_label;
+
+ /**
+ * Label of private key object on PKCS#11 token (UTF-8).
+ * If zeroed out, the private key will be chosen based on other criteria
+ * (such as being the only available private key on the token).
+ */
+ struct aws_byte_cursor private_key_object_label;
+
+ /**
+ * Certificate's file path on disk (UTF-8).
+ * The certificate must be PEM formatted and UTF-8 encoded.
+ * Zero out if passing in certificate by some other means (such as file contents).
+ */
+ struct aws_byte_cursor cert_file_path;
+
+ /**
+ * Certificate's file contents (UTF-8).
+ * The certificate must be PEM formatted and UTF-8 encoded.
+ * Zero out if passing in certificate by some other means (such as file path).
+ */
+ struct aws_byte_cursor cert_file_contents;
+};
+
+/**
+ * Initializes options for use with mutual TLS in client mode,
+ * where a PKCS#11 library provides access to the private key.
+ *
+ * NOTE: This only works on Unix devices.
+ *
+ * @param options aws_tls_ctx_options to be initialized.
+ * @param allocator Allocator to use.
+ * @param pkcs11_options Options for using PKCS#11 (contents are copied)
+ */
+AWS_IO_API int aws_tls_ctx_options_init_client_mtls_with_pkcs11(
+ struct aws_tls_ctx_options *options,
+ struct aws_allocator *allocator,
+ const struct aws_tls_ctx_pkcs11_options *pkcs11_options);
+
+/**
+ * @Deprecated
+ *
+ * Sets a custom keychain path for storing the cert and pkey with mutual tls in client mode.
+ *
+ * NOTE: This only works on MacOS.
+ */
+AWS_IO_API int aws_tls_ctx_options_set_keychain_path(
+ struct aws_tls_ctx_options *options,
+ struct aws_byte_cursor *keychain_path_cursor);
+
+/**
* Initializes options for use with in server mode.
* cert_path and pkey_path are paths to files on disk. cert_path
* and pkey_path are treated as PKCS#7 PEM armored. They are loaded
@@ -257,37 +412,38 @@ AWS_IO_API int aws_tls_ctx_options_init_default_server(
struct aws_byte_cursor *cert,
struct aws_byte_cursor *pkey);
-#endif /* AWS_OS_IOS */
-
-#ifdef _WIN32
/**
- * Initializes options for use with mutual tls in client mode. This function is only available on
- * windows. cert_reg_path is the path to a system
+ * Initializes options for use with mutual tls in client mode.
+ * cert_reg_path is the path to a system
* installed certficate/private key pair. Example:
* CurrentUser\\MY\\<thumprint>
+ *
+ * NOTE: This only works on Windows.
*/
-AWS_IO_API void aws_tls_ctx_options_init_client_mtls_from_system_path(
+AWS_IO_API int aws_tls_ctx_options_init_client_mtls_from_system_path(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
const char *cert_reg_path);
/**
- * Initializes options for use with server mode. This function is only available on
- * windows. cert_reg_path is the path to a system
+ * Initializes options for use with server mode.
+ * cert_reg_path is the path to a system
* installed certficate/private key pair. Example:
* CurrentUser\\MY\\<thumprint>
+ *
+ * NOTE: This only works on Windows.
*/
-AWS_IO_API void aws_tls_ctx_options_init_default_server_from_system_path(
+AWS_IO_API int aws_tls_ctx_options_init_default_server_from_system_path(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
const char *cert_reg_path);
-#endif /* _WIN32 */
-#ifdef __APPLE__
/**
- * Initializes options for use with mutual tls in client mode. This function is only available on
- * apple machines. pkcs12_path is a path to a file on disk containing a pkcs#12 file. The file is loaded
+ * Initializes options for use with mutual tls in client mode.
+ * pkcs12_path is a path to a file on disk containing a pkcs#12 file. The file is loaded
* into an internal buffer. pkcs_pwd is the corresponding password for the pkcs#12 file; it is copied.
+ *
+ * NOTE: This only works on Apple devices.
*/
AWS_IO_API int aws_tls_ctx_options_init_client_mtls_pkcs12_from_path(
struct aws_tls_ctx_options *options,
@@ -296,9 +452,11 @@ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_pkcs12_from_path(
struct aws_byte_cursor *pkcs_pwd);
/**
- * Initializes options for use with mutual tls in client mode. This function is only available on
- * apple machines. pkcs12 is a buffer containing a pkcs#12 certificate and private key; it is copied.
+ * Initializes options for use with mutual tls in client mode.
+ * pkcs12 is a buffer containing a pkcs#12 certificate and private key; it is copied.
* pkcs_pwd is the corresponding password for the pkcs#12 buffer; it is copied.
+ *
+ * NOTE: This only works on Apple devices.
*/
AWS_IO_API int aws_tls_ctx_options_init_client_mtls_pkcs12(
struct aws_tls_ctx_options *options,
@@ -307,9 +465,11 @@ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_pkcs12(
struct aws_byte_cursor *pkcs_pwd);
/**
- * Initializes options for use in server mode. This function is only available on
- * apple machines. pkcs12_path is a path to a file on disk containing a pkcs#12 file. The file is loaded
+ * Initializes options for use in server mode.
+ * pkcs12_path is a path to a file on disk containing a pkcs#12 file. The file is loaded
* into an internal buffer. pkcs_pwd is the corresponding password for the pkcs#12 file; it is copied.
+ *
+ * NOTE: This only works on Apple devices.
*/
AWS_IO_API int aws_tls_ctx_options_init_server_pkcs12_from_path(
struct aws_tls_ctx_options *options,
@@ -318,16 +478,17 @@ AWS_IO_API int aws_tls_ctx_options_init_server_pkcs12_from_path(
struct aws_byte_cursor *pkcs_password);
/**
- * Initializes options for use in server mode. This function is only available on
- * apple machines. pkcs12 is a buffer containing a pkcs#12 certificate and private key; it is copied.
+ * Initializes options for use in server mode.
+ * pkcs12 is a buffer containing a pkcs#12 certificate and private key; it is copied.
* pkcs_pwd is the corresponding password for the pkcs#12 buffer; it is copied.
+ *
+ * NOTE: This only works on Apple devices.
*/
AWS_IO_API int aws_tls_ctx_options_init_server_pkcs12(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
struct aws_byte_cursor *pkcs12,
struct aws_byte_cursor *pkcs_password);
-#endif /* __APPLE__ */
/**
* Sets alpn list in the form <protocol1;protocol2;...>. A maximum of 4 protocols are supported.
@@ -367,6 +528,12 @@ AWS_IO_API int aws_tls_ctx_options_override_default_trust_store_from_path(
const char *ca_file);
/**
+ * When implementing BYO_CRYPTO, if you need extra data to pass to your tls implementation, set it here. The lifetime of
+ * extension_data must outlive the options object and be cleaned up after options is cleaned up.
+ */
+AWS_IO_API void aws_tls_ctx_options_set_extension_data(struct aws_tls_ctx_options *options, void *extension_data);
+
+/**
* Initializes default connection options from an instance ot aws_tls_ctx.
*/
AWS_IO_API void aws_tls_connection_options_init_from_ctx(
@@ -450,6 +617,18 @@ AWS_IO_API struct aws_channel_handler *aws_tls_server_handler_new(
struct aws_tls_connection_options *options,
struct aws_channel_slot *slot);
+#ifdef BYO_CRYPTO
+/**
+ * If using BYO_CRYPTO, you need to call this function prior to creating any client channels in the application.
+ */
+AWS_IO_API void aws_tls_byo_crypto_set_client_setup_options(const struct aws_tls_byo_crypto_setup_options *options);
+/**
+ * If using BYO_CRYPTO, you need to call this function prior to creating any server channels in the application.
+ */
+AWS_IO_API void aws_tls_byo_crypto_set_server_setup_options(const struct aws_tls_byo_crypto_setup_options *options);
+
+#endif /* BYO_CRYPTO */
+
/**
* Creates a channel handler, for client or server mode, that handles alpn. This isn't necessarily required
* since you can always call aws_tls_handler_protocol in the aws_tls_on_negotiation_result_fn callback, but
@@ -466,6 +645,7 @@ AWS_IO_API struct aws_channel_handler *aws_tls_alpn_handler_new(
*/
AWS_IO_API int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler);
+#ifndef BYO_CRYPTO
/**
* Creates a new server ctx. This ctx can be used for the lifetime of the application assuming you want the same
* options for every incoming connection. Options will be copied.
@@ -481,6 +661,7 @@ AWS_IO_API struct aws_tls_ctx *aws_tls_server_ctx_new(
AWS_IO_API struct aws_tls_ctx *aws_tls_client_ctx_new(
struct aws_allocator *alloc,
const struct aws_tls_ctx_options *options);
+#endif /* BYO_CRYPTO */
/**
* Increments the reference count on the tls context, allowing the caller to take a reference to it.
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/uri.h b/contrib/restricted/aws/aws-c-io/include/aws/io/uri.h
index 06206204ae..d57a5f6691 100644
--- a/contrib/restricted/aws/aws-c-io/include/aws/io/uri.h
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/uri.h
@@ -18,6 +18,9 @@ struct aws_uri {
struct aws_byte_buf uri_str;
struct aws_byte_cursor scheme;
struct aws_byte_cursor authority;
+ struct aws_byte_cursor userinfo;
+ struct aws_byte_cursor user;
+ struct aws_byte_cursor password;
struct aws_byte_cursor host_name;
uint16_t port;
struct aws_byte_cursor path;
diff --git a/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c b/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c
index 3de882d045..4caaf9c674 100644
--- a/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c
+++ b/contrib/restricted/aws/aws-c-io/source/bsd/kqueue_event_loop.c
@@ -91,6 +91,8 @@ struct kqueue_loop {
* on them later */
enum event_thread_state state;
} thread_data;
+
+ struct aws_thread_options thread_options;
};
/* Data attached to aws_io_handle while the handle is subscribed to io events */
@@ -128,9 +130,13 @@ struct aws_event_loop_vtable s_kqueue_vtable = {
.is_on_callers_thread = s_is_event_thread,
};
-struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock) {
+struct aws_event_loop *aws_event_loop_new_default_with_options(
+ struct aws_allocator *alloc,
+ const struct aws_event_loop_options *options) {
AWS_ASSERT(alloc);
AWS_ASSERT(clock);
+ AWS_ASSERT(options);
+ AWS_ASSERT(options->clock);
bool clean_up_event_loop_mem = false;
bool clean_up_event_loop_base = false;
@@ -149,7 +155,7 @@ struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, a
AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered kqueue", (void *)event_loop);
clean_up_event_loop_mem = true;
- int err = aws_event_loop_init_base(event_loop, alloc, clock);
+ int err = aws_event_loop_init_base(event_loop, alloc, options->clock);
if (err) {
goto clean_up;
}
@@ -159,6 +165,13 @@ struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, a
if (!impl) {
goto clean_up;
}
+
+ if (options->thread_options) {
+ impl->thread_options = *options->thread_options;
+ } else {
+ impl->thread_options = *aws_default_thread_options();
+ }
+
/* intialize thread id to NULL. It will be set when the event loop thread starts. */
aws_atomic_init_ptr(&impl->running_thread_id, NULL);
clean_up_impl_mem = true;
@@ -353,8 +366,12 @@ static int s_run(struct aws_event_loop *event_loop) {
* and it's ok to touch cross_thread_data without locking the mutex */
impl->cross_thread_data.state = EVENT_THREAD_STATE_RUNNING;
- int err = aws_thread_launch(&impl->thread_created_on, s_event_thread_main, (void *)event_loop, NULL);
+ aws_thread_increment_unjoined_count();
+ int err =
+ aws_thread_launch(&impl->thread_created_on, s_event_thread_main, (void *)event_loop, &impl->thread_options);
+
if (err) {
+ aws_thread_decrement_unjoined_count();
AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop);
goto clean_up;
}
@@ -414,6 +431,7 @@ static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) {
#endif
int err = aws_thread_join(&impl->thread_created_on);
+ aws_thread_decrement_unjoined_count();
if (err) {
return AWS_OP_ERR;
}
@@ -836,6 +854,7 @@ static void s_event_thread_main(void *user_data) {
int num_kevents = kevent(
impl->kq_fd, NULL /*changelist*/, 0 /*nchanges*/, kevents /*eventlist*/, MAX_EVENTS /*nevents*/, &timeout);
+ aws_event_loop_register_tick_start(event_loop);
AWS_LOGF_TRACE(
AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, num_kevents);
if (num_kevents == -1) {
@@ -952,6 +971,8 @@ static void s_event_thread_main(void *user_data) {
timeout.tv_sec = (time_t)(timeout_sec);
timeout.tv_nsec = (long)(timeout_remainder_ns);
}
+
+ aws_event_loop_register_tick_end(event_loop);
}
AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop);
diff --git a/contrib/restricted/aws/aws-c-io/source/channel.c b/contrib/restricted/aws/aws-c-io/source/channel.c
index a36fa269d7..5eb5c1e7a2 100644
--- a/contrib/restricted/aws/aws-c-io/source/channel.c
+++ b/contrib/restricted/aws/aws-c-io/source/channel.c
@@ -1142,3 +1142,29 @@ int aws_channel_set_statistics_handler(struct aws_channel *channel, struct aws_c
struct aws_event_loop *aws_channel_get_event_loop(struct aws_channel *channel) {
return channel->loop;
}
+
+int aws_channel_trigger_read(struct aws_channel *channel) {
+ if (channel == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!aws_channel_thread_is_callers_thread(channel)) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct aws_channel_slot *slot = channel->first;
+ if (slot == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct aws_channel_handler *handler = slot->handler;
+ if (handler == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ if (handler->vtable->trigger_read != NULL) {
+ handler->vtable->trigger_read(handler);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c b/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c
index f5e0ad7aff..b3a638aaff 100644
--- a/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c
+++ b/contrib/restricted/aws/aws-c-io/source/channel_bootstrap.c
@@ -131,6 +131,7 @@ struct client_connection_args {
bool connection_chosen;
bool setup_called;
bool enable_read_back_pressure;
+ struct aws_event_loop *requested_event_loop;
/*
* It is likely that all reference adjustments to the connection args take place in a single event loop
@@ -171,6 +172,18 @@ static void s_client_connection_args_release(struct client_connection_args *args
}
}
+static struct aws_event_loop *s_get_connection_event_loop(struct client_connection_args *args) {
+ if (args == NULL) {
+ return NULL;
+ }
+
+ if (args->requested_event_loop != NULL) {
+ return args->requested_event_loop;
+ }
+
+ return aws_event_loop_group_get_next_loop(args->bootstrap->event_loop_group);
+}
+
static void s_connection_args_setup_callback(
struct client_connection_args *args,
int error_code,
@@ -632,8 +645,7 @@ static void s_on_host_resolved(
(void *)client_connection_args->bootstrap,
(unsigned long long)host_addresses_len);
/* use this event loop for all outgoing connection attempts (only one will ultimately win). */
- struct aws_event_loop *connect_loop =
- aws_event_loop_group_get_next_loop(client_connection_args->bootstrap->event_loop_group);
+ struct aws_event_loop *connect_loop = s_get_connection_event_loop(client_connection_args);
client_connection_args->addresses_count = (uint8_t)host_addresses_len;
/* allocate all the task data first, in case it fails... */
@@ -692,6 +704,24 @@ static void s_on_host_resolved(
}
}
+static bool s_does_event_loop_belong_to_event_loop_group(
+ struct aws_event_loop *loop,
+ struct aws_event_loop_group *elg) {
+ if (loop == NULL || elg == NULL) {
+ return false;
+ }
+
+ size_t loop_count = aws_event_loop_group_get_loop_count(elg);
+ for (size_t i = 0; i < loop_count; ++i) {
+ struct aws_event_loop *elg_loop = aws_event_loop_group_get_loop_at(elg, i);
+ if (elg_loop == loop) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
int aws_client_bootstrap_new_socket_channel(struct aws_socket_channel_bootstrap_options *options) {
struct aws_client_bootstrap *bootstrap = options->bootstrap;
@@ -707,6 +737,14 @@ int aws_client_bootstrap_new_socket_channel(struct aws_socket_channel_bootstrap_
AWS_FATAL_ASSERT(tls_options == NULL || socket_options->type == AWS_SOCKET_STREAM);
aws_io_fatal_assert_library_initialized();
+ if (options->requested_event_loop != NULL) {
+ /* If we're asking for a specific event loop, verify it belongs to the bootstrap's event loop group */
+ if (!(s_does_event_loop_belong_to_event_loop_group(
+ options->requested_event_loop, bootstrap->event_loop_group))) {
+ return aws_raise_error(AWS_ERROR_IO_PINNED_EVENT_LOOP_MISMATCH);
+ }
+ }
+
struct client_connection_args *client_connection_args =
aws_mem_calloc(bootstrap->allocator, 1, sizeof(struct client_connection_args));
@@ -736,6 +774,7 @@ int aws_client_bootstrap_new_socket_channel(struct aws_socket_channel_bootstrap_
client_connection_args->outgoing_options = *socket_options;
client_connection_args->outgoing_port = port;
client_connection_args->enable_read_back_pressure = options->enable_read_back_pressure;
+ client_connection_args->requested_event_loop = options->requested_event_loop;
if (tls_options) {
if (aws_tls_connection_options_copy(&client_connection_args->channel_data.tls_options, tls_options)) {
@@ -815,7 +854,7 @@ int aws_client_bootstrap_new_socket_channel(struct aws_socket_channel_bootstrap_
client_connection_args->addresses_count = 1;
- struct aws_event_loop *connect_loop = aws_event_loop_group_get_next_loop(bootstrap->event_loop_group);
+ struct aws_event_loop *connect_loop = s_get_connection_event_loop(client_connection_args);
s_client_connection_args_acquire(client_connection_args);
if (aws_socket_connect(
@@ -1085,6 +1124,16 @@ static inline int s_setup_server_tls(struct server_channel_data *channel_data, s
}
}
+ /*
+ * Server-side channels can reach this point in execution and actually have the CLIENT_HELLO payload already
+ * on the socket in a signalled state, but there was no socket handler or read handler at the time of signal.
+ * So we need to manually trigger a read here to cover that case, otherwise the negotiation will time out because
+ * we will not receive any more data/notifications (unless we read and react).
+ */
+ if (aws_channel_trigger_read(channel)) {
+ return AWS_OP_ERR;
+ }
+
return AWS_OP_SUCCESS;
}
diff --git a/contrib/restricted/aws/aws-c-io/source/event_loop.c b/contrib/restricted/aws/aws-c-io/source/event_loop.c
index 0b4a41b374..5eb8a084ac 100644
--- a/contrib/restricted/aws/aws-c-io/source/event_loop.c
+++ b/contrib/restricted/aws/aws-c-io/source/event_loop.c
@@ -6,9 +6,19 @@
#include <aws/io/event_loop.h>
#include <aws/common/clock.h>
+#include <aws/common/device_random.h>
#include <aws/common/system_info.h>
#include <aws/common/thread.h>
+struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock) {
+ struct aws_event_loop_options options = {
+ .thread_options = NULL,
+ .clock = clock,
+ };
+
+ return aws_event_loop_new_default_with_options(alloc, &options);
+}
+
static void s_event_loop_group_thread_exit(void *user_data) {
struct aws_event_loop_group *el_group = user_data;
@@ -20,8 +30,6 @@ static void s_event_loop_group_thread_exit(void *user_data) {
if (completion_callback != NULL) {
completion_callback(completion_user_data);
}
-
- aws_global_thread_creator_decrement();
}
static void s_aws_event_loop_group_shutdown_sync(struct aws_event_loop_group *el_group) {
@@ -59,24 +67,45 @@ static void s_aws_event_loop_group_shutdown_async(struct aws_event_loop_group *e
struct aws_thread_options thread_options;
AWS_ZERO_STRUCT(thread_options);
+ thread_options.cpu_id = -1;
+ thread_options.join_strategy = AWS_TJS_MANAGED;
AWS_FATAL_ASSERT(
aws_thread_launch(&cleanup_thread, s_event_loop_destroy_async_thread_fn, el_group, &thread_options) ==
AWS_OP_SUCCESS);
-
- aws_thread_clean_up(&cleanup_thread);
}
-struct aws_event_loop_group *aws_event_loop_group_new(
+static struct aws_event_loop_group *s_event_loop_group_new(
struct aws_allocator *alloc,
aws_io_clock_fn *clock,
uint16_t el_count,
+ uint16_t cpu_group,
+ bool pin_threads,
aws_new_event_loop_fn *new_loop_fn,
void *new_loop_user_data,
const struct aws_shutdown_callback_options *shutdown_options) {
-
AWS_ASSERT(new_loop_fn);
+ size_t group_cpu_count = 0;
+ struct aws_cpu_info *usable_cpus = NULL;
+
+ if (pin_threads) {
+ group_cpu_count = aws_get_cpu_count_for_group(cpu_group);
+
+ if (!group_cpu_count) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ usable_cpus = aws_mem_calloc(alloc, group_cpu_count, sizeof(struct aws_cpu_info));
+
+ if (usable_cpus == NULL) {
+ return NULL;
+ }
+
+ aws_get_cpu_ids_for_group(cpu_group, usable_cpus, group_cpu_count);
+ }
+
struct aws_event_loop_group *el_group = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop_group));
if (el_group == NULL) {
return NULL;
@@ -85,26 +114,39 @@ struct aws_event_loop_group *aws_event_loop_group_new(
el_group->allocator = alloc;
aws_ref_count_init(
&el_group->ref_count, el_group, (aws_simple_completion_callback *)s_aws_event_loop_group_shutdown_async);
- aws_atomic_init_int(&el_group->current_index, 0);
if (aws_array_list_init_dynamic(&el_group->event_loops, alloc, el_count, sizeof(struct aws_event_loop *))) {
goto on_error;
}
for (uint16_t i = 0; i < el_count; ++i) {
- struct aws_event_loop *loop = new_loop_fn(alloc, clock, new_loop_user_data);
+ /* Don't pin to hyper-threads if a user cared enough to specify a NUMA node */
+ if (!pin_threads || (i < group_cpu_count && !usable_cpus[i].suspected_hyper_thread)) {
+ struct aws_thread_options thread_options = *aws_default_thread_options();
- if (!loop) {
- goto on_error;
- }
+ struct aws_event_loop_options options = {
+ .clock = clock,
+ };
- if (aws_array_list_push_back(&el_group->event_loops, (const void *)&loop)) {
- aws_event_loop_destroy(loop);
- goto on_error;
- }
+ if (pin_threads) {
+ thread_options.cpu_id = usable_cpus[i].cpu_id;
+ options.thread_options = &thread_options;
+ }
+
+ struct aws_event_loop *loop = new_loop_fn(alloc, &options, new_loop_user_data);
+
+ if (!loop) {
+ goto on_error;
+ }
- if (aws_event_loop_run(loop)) {
- goto on_error;
+ if (aws_array_list_push_back(&el_group->event_loops, (const void *)&loop)) {
+ aws_event_loop_destroy(loop);
+ goto on_error;
+ }
+
+ if (aws_event_loop_run(loop)) {
+ goto on_error;
+ }
}
}
@@ -112,25 +154,42 @@ struct aws_event_loop_group *aws_event_loop_group_new(
el_group->shutdown_options = *shutdown_options;
}
- aws_global_thread_creator_increment();
+ if (pin_threads) {
+ aws_mem_release(alloc, usable_cpus);
+ }
return el_group;
on_error:
+ aws_mem_release(alloc, usable_cpus);
s_aws_event_loop_group_shutdown_sync(el_group);
s_event_loop_group_thread_exit(el_group);
return NULL;
}
-static struct aws_event_loop *default_new_event_loop(
- struct aws_allocator *allocator,
+struct aws_event_loop_group *aws_event_loop_group_new(
+ struct aws_allocator *alloc,
aws_io_clock_fn *clock,
+ uint16_t el_count,
+ aws_new_event_loop_fn *new_loop_fn,
+ void *new_loop_user_data,
+ const struct aws_shutdown_callback_options *shutdown_options) {
+
+ AWS_ASSERT(new_loop_fn);
+ AWS_ASSERT(el_count);
+
+ return s_event_loop_group_new(alloc, clock, el_count, 0, false, new_loop_fn, new_loop_user_data, shutdown_options);
+}
+
+static struct aws_event_loop *s_default_new_event_loop(
+ struct aws_allocator *allocator,
+ const struct aws_event_loop_options *options,
void *user_data) {
(void)user_data;
- return aws_event_loop_new_default(allocator, clock);
+ return aws_event_loop_new_default_with_options(allocator, options);
}
struct aws_event_loop_group *aws_event_loop_group_new_default(
@@ -138,11 +197,44 @@ struct aws_event_loop_group *aws_event_loop_group_new_default(
uint16_t max_threads,
const struct aws_shutdown_callback_options *shutdown_options) {
if (!max_threads) {
- max_threads = (uint16_t)aws_system_info_processor_count();
+ uint16_t processor_count = (uint16_t)aws_system_info_processor_count();
+ /* cut them in half to avoid using hyper threads for the IO work. */
+ max_threads = processor_count > 1 ? processor_count / 2 : processor_count;
}
return aws_event_loop_group_new(
- alloc, aws_high_res_clock_get_ticks, max_threads, default_new_event_loop, NULL, shutdown_options);
+ alloc, aws_high_res_clock_get_ticks, max_threads, s_default_new_event_loop, NULL, shutdown_options);
+}
+
+struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group(
+ struct aws_allocator *alloc,
+ aws_io_clock_fn *clock,
+ uint16_t el_count,
+ uint16_t cpu_group,
+ aws_new_event_loop_fn *new_loop_fn,
+ void *new_loop_user_data,
+ const struct aws_shutdown_callback_options *shutdown_options) {
+ AWS_ASSERT(new_loop_fn);
+ AWS_ASSERT(el_count);
+
+ return s_event_loop_group_new(
+ alloc, clock, el_count, cpu_group, true, new_loop_fn, new_loop_user_data, shutdown_options);
+}
+
+struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group(
+ struct aws_allocator *alloc,
+ uint16_t max_threads,
+ uint16_t cpu_group,
+ const struct aws_shutdown_callback_options *shutdown_options) {
+
+ if (!max_threads) {
+ uint16_t processor_count = (uint16_t)aws_system_info_processor_count();
+ /* cut them in half to avoid using hyper threads for the IO work. */
+ max_threads = processor_count > 1 ? processor_count / 2 : processor_count;
+ }
+
+ return aws_event_loop_group_new_pinned_to_cpu_group(
+ alloc, aws_high_res_clock_get_ticks, max_threads, cpu_group, s_default_new_event_loop, NULL, shutdown_options);
}
struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group) {
@@ -176,19 +268,33 @@ struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_
return NULL;
}
- /* thread safety: atomic CAS to ensure we got the best loop, and that the index is within bounds */
- size_t old_index = 0;
- size_t new_index = 0;
- do {
- old_index = aws_atomic_load_int(&el_group->current_index);
- new_index = (old_index + 1) % loop_count;
- } while (!aws_atomic_compare_exchange_int(&el_group->current_index, &old_index, new_index));
+ /* do one call to get 32 random bits because this hits an actual entropy source and it's not cheap */
+ uint32_t random_32_bit_num = 0;
+ aws_device_random_u32(&random_32_bit_num);
+
+ /* use the best of two algorithm to select the loop with the lowest load.
+ * If we find device random is too hard on the kernel, we can seed it and use another random
+ * number generator. */
+
+ /* it's fine and intentional, the case will throw off the top 16 bits and that's what we want. */
+ uint16_t random_num_a = (uint16_t)random_32_bit_num;
+ random_num_a = random_num_a % loop_count;
+
+ uint16_t random_num_b = (uint16_t)(random_32_bit_num >> 16);
+ random_num_b = random_num_b % loop_count;
+
+ struct aws_event_loop *random_loop_a = NULL;
+ struct aws_event_loop *random_loop_b = NULL;
+ aws_array_list_get_at(&el_group->event_loops, &random_loop_a, random_num_a);
+ aws_array_list_get_at(&el_group->event_loops, &random_loop_b, random_num_b);
- struct aws_event_loop *loop = NULL;
+ /* there's no logical reason why this should ever be possible. It's just best to die if it happens. */
+ AWS_FATAL_ASSERT((random_loop_a && random_loop_b) && "random_loop_a or random_loop_b is NULL.");
- /* if the fetch fails, we don't really care since loop will be NULL and error code will already be set. */
- aws_array_list_get_at(&el_group->event_loops, &loop, old_index);
- return loop;
+ size_t load_a = aws_event_loop_get_load_factor(random_loop_a);
+ size_t load_b = aws_event_loop_get_load_factor(random_loop_b);
+
+ return load_a < load_b ? random_loop_a : random_loop_b;
}
static void s_object_removed(void *value) {
@@ -203,6 +309,8 @@ int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_alloc
event_loop->alloc = alloc;
event_loop->clock = clock;
+ aws_atomic_init_int(&event_loop->current_load_factor, 0u);
+ aws_atomic_init_int(&event_loop->next_flush_time, 0u);
if (aws_hash_table_init(&event_loop->local_data, alloc, 20, aws_hash_ptr, aws_ptr_eq, NULL, s_object_removed)) {
return AWS_OP_ERR;
@@ -215,6 +323,52 @@ void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop) {
aws_hash_table_clean_up(&event_loop->local_data);
}
+void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop) {
+ aws_high_res_clock_get_ticks(&event_loop->latest_tick_start);
+}
+
+void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop) {
+ /* increment the timestamp diff counter (this should always be called from the same thread), the concurrency
+ * work happens during the flush. */
+ uint64_t end_tick = 0;
+ aws_high_res_clock_get_ticks(&end_tick);
+
+ size_t elapsed = (size_t)aws_min_u64(end_tick - event_loop->latest_tick_start, SIZE_MAX);
+ event_loop->current_tick_latency_sum = aws_add_size_saturating(event_loop->current_tick_latency_sum, elapsed);
+ event_loop->latest_tick_start = 0;
+
+ size_t next_flush_time_secs = aws_atomic_load_int(&event_loop->next_flush_time);
+ /* store as seconds because we can't make a 64-bit integer reliably atomic across platforms. */
+ uint64_t end_tick_secs = aws_timestamp_convert(end_tick, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
+
+ /* if a second has passed, flush the load-factor. */
+ if (end_tick_secs > next_flush_time_secs) {
+ aws_atomic_store_int(&event_loop->current_load_factor, event_loop->current_tick_latency_sum);
+ event_loop->current_tick_latency_sum = 0;
+ /* run again in a second. */
+ aws_atomic_store_int(&event_loop->next_flush_time, (size_t)(end_tick_secs + 1));
+ }
+}
+
+size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop) {
+ uint64_t current_time = 0;
+ aws_high_res_clock_get_ticks(&current_time);
+
+ uint64_t current_time_secs = aws_timestamp_convert(current_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
+ size_t next_flush_time_secs = aws_atomic_load_int(&event_loop->next_flush_time);
+
+ /* safety valve just in case an event-loop had heavy load and then went completely idle. If we haven't
+ * had an update from the event-loop in 10 seconds, just assume idle. Also, yes this is racy, but it should
+ * be good enough because an active loop will be updating its counter frequently ( more than once per 10 seconds
+ * for sure ), in the case where we hit the technical race condition, we don't care anyways and returning 0
+ * is the desired behavior. */
+ if (current_time_secs > next_flush_time_secs + 10) {
+ return 0;
+ }
+
+ return aws_atomic_load_int(&event_loop->current_load_factor);
+}
+
void aws_event_loop_destroy(struct aws_event_loop *event_loop) {
if (!event_loop) {
return;
diff --git a/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c b/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c
index f064f2118c..22b53b2991 100644
--- a/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c
+++ b/contrib/restricted/aws/aws-c-io/source/exponential_backoff_retry_strategy.c
@@ -9,7 +9,6 @@
#include <aws/common/clock.h>
#include <aws/common/device_random.h>
-#include <aws/common/logging.h>
#include <aws/common/mutex.h>
#include <aws/common/task_scheduler.h>
@@ -27,7 +26,7 @@ struct exponential_backoff_retry_token {
size_t max_retries;
uint64_t backoff_scale_factor_ns;
enum aws_exponential_backoff_jitter_mode jitter_mode;
- /* Let's not make this worst by constantly moving across threads if we can help it */
+ /* Let's not make this worse by constantly moving across threads if we can help it */
struct aws_event_loop *bound_loop;
uint64_t (*generate_random)(void);
struct aws_task retry_task;
@@ -42,7 +41,10 @@ struct exponential_backoff_retry_token {
static void s_exponential_retry_destroy(struct aws_retry_strategy *retry_strategy) {
if (retry_strategy) {
- aws_mem_release(retry_strategy->allocator, retry_strategy);
+ struct exponential_backoff_strategy *exponential_strategy = retry_strategy->impl;
+ struct aws_event_loop_group *el_group = exponential_strategy->config.el_group;
+ aws_mem_release(retry_strategy->allocator, exponential_strategy);
+ aws_ref_count_release(&el_group->ref_count);
}
}
@@ -72,6 +74,7 @@ static void s_exponential_retry_task(struct aws_task *task, void *arg, enum aws_
!aws_mutex_unlock(&backoff_retry_token->thread_data.mutex) && "Retry token mutex release failed");
} /**** END CRITICAL SECTION ***********/
+ aws_retry_token_acquire(&backoff_retry_token->base);
if (acquired_fn) {
AWS_LOGF_DEBUG(
AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY,
@@ -86,7 +89,10 @@ static void s_exponential_retry_task(struct aws_task *task, void *arg, enum aws_
(void *)backoff_retry_token->base.retry_strategy,
(void *)&backoff_retry_token->base);
retry_ready_fn(&backoff_retry_token->base, error_code, user_data);
+ /* it's acquired before being scheduled for retry */
+ aws_retry_token_release(&backoff_retry_token->base);
}
+ aws_retry_token_release(&backoff_retry_token->base);
}
static int s_exponential_retry_acquire_token(
@@ -114,6 +120,7 @@ static int s_exponential_retry_acquire_token(
backoff_retry_token->base.allocator = retry_strategy->allocator;
backoff_retry_token->base.retry_strategy = retry_strategy;
+ aws_atomic_init_int(&backoff_retry_token->base.ref_count, 1u);
aws_retry_strategy_acquire(retry_strategy);
backoff_retry_token->base.impl = backoff_retry_token;
@@ -222,7 +229,7 @@ static int s_exponential_retry_schedule_retry(
aws_event_loop_current_clock_time(backoff_retry_token->bound_loop, &current_time);
schedule_at = backoff + current_time;
aws_atomic_init_int(&backoff_retry_token->last_backoff, (size_t)backoff);
- aws_atomic_fetch_add(&backoff_retry_token->current_retry_count, 1);
+ aws_atomic_fetch_add(&backoff_retry_token->current_retry_count, 1u);
AWS_LOGF_DEBUG(
AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY,
"id=%p: Computed backoff value of %" PRIu64 "ns on token %p",
@@ -242,6 +249,8 @@ static int s_exponential_retry_schedule_retry(
} else {
backoff_retry_token->thread_data.retry_ready_fn = retry_ready;
backoff_retry_token->thread_data.user_data = user_data;
+ /* acquire to hold until the task runs. */
+ aws_retry_token_acquire(token);
aws_task_init(
&backoff_retry_token->retry_task,
s_exponential_retry_task,
@@ -329,13 +338,15 @@ struct aws_retry_strategy *aws_retry_strategy_new_exponential_backoff(
exponential_backoff_strategy->base.vtable = &s_exponential_retry_vtable;
aws_atomic_init_int(&exponential_backoff_strategy->base.ref_count, 1);
exponential_backoff_strategy->config = *config;
+ exponential_backoff_strategy->config.el_group =
+ aws_ref_count_acquire(&exponential_backoff_strategy->config.el_group->ref_count);
if (!exponential_backoff_strategy->config.generate_random) {
exponential_backoff_strategy->config.generate_random = s_default_gen_rand;
}
if (!exponential_backoff_strategy->config.max_retries) {
- exponential_backoff_strategy->config.max_retries = 10;
+ exponential_backoff_strategy->config.max_retries = 5;
}
if (!exponential_backoff_strategy->config.backoff_scale_factor_ms) {
diff --git a/contrib/restricted/aws/aws-c-io/source/file_utils_shared.c b/contrib/restricted/aws/aws-c-io/source/file_utils_shared.c
deleted file mode 100644
index 00a5f38800..0000000000
--- a/contrib/restricted/aws/aws-c-io/source/file_utils_shared.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/io/file_utils.h>
-
-#include <aws/common/environment.h>
-#include <aws/common/string.h>
-#include <aws/io/logging.h>
-
-#include <errno.h>
-#include <stdio.h>
-
-#ifdef _MSC_VER
-# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
-#endif /* _MSC_VER */
-
-int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) {
- AWS_ZERO_STRUCT(*out_buf);
- FILE *fp = fopen(filename, "rb");
-
- if (fp) {
- if (fseek(fp, 0L, SEEK_END)) {
- AWS_LOGF_ERROR(AWS_LS_IO_FILE_UTILS, "static: Failed to seek file %s with errno %d", filename, errno);
- fclose(fp);
- return aws_translate_and_raise_io_error(errno);
- }
-
- size_t allocation_size = (size_t)ftell(fp) + 1;
- /* Tell the user that we allocate here and if success they're responsible for the free. */
- if (aws_byte_buf_init(out_buf, alloc, allocation_size)) {
- fclose(fp);
- return AWS_OP_ERR;
- }
-
- /* Ensure compatibility with null-terminated APIs, but don't consider
- * the null terminator part of the length of the payload */
- out_buf->len = out_buf->capacity - 1;
- out_buf->buffer[out_buf->len] = 0;
-
- if (fseek(fp, 0L, SEEK_SET)) {
- AWS_LOGF_ERROR(AWS_LS_IO_FILE_UTILS, "static: Failed to seek file %s with errno %d", filename, errno);
- aws_byte_buf_clean_up(out_buf);
- fclose(fp);
- return aws_translate_and_raise_io_error(errno);
- }
-
- size_t read = fread(out_buf->buffer, 1, out_buf->len, fp);
- fclose(fp);
- if (read < out_buf->len) {
- AWS_LOGF_ERROR(AWS_LS_IO_FILE_UTILS, "static: Failed to read file %s with errno %d", filename, errno);
- aws_secure_zero(out_buf->buffer, out_buf->len);
- aws_byte_buf_clean_up(out_buf);
- return aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE);
- }
-
- return AWS_OP_SUCCESS;
- }
-
- AWS_LOGF_ERROR(AWS_LS_IO_FILE_UTILS, "static: Failed to open file %s with errno %d", filename, errno);
-
- return aws_translate_and_raise_io_error(errno);
-}
-
-bool aws_is_any_directory_separator(char value) {
- return value == '\\' || value == '/';
-}
diff --git a/contrib/restricted/aws/aws-c-io/source/host_resolver.c b/contrib/restricted/aws/aws-c-io/source/host_resolver.c
index 2df732a904..8cd3c2ba6b 100644
--- a/contrib/restricted/aws/aws-c-io/source/host_resolver.c
+++ b/contrib/restricted/aws/aws-c-io/source/host_resolver.c
@@ -148,6 +148,11 @@ struct default_host_resolver {
* callback.
*/
uint32_t pending_host_entry_shutdown_completion_callbacks;
+
+ /*
+ * Function to use to query current time. Overridable in construction options.
+ */
+ aws_io_clock_fn *system_clock_fn;
};
/* Default host resolver implementation for listener. */
@@ -161,6 +166,7 @@ struct host_listener {
/* User-supplied callbacks/user_data */
aws_host_listener_resolved_address_fn *resolved_address_callback;
+ aws_host_listener_expired_address_fn *expired_address_callback;
aws_host_listener_shutdown_fn *shutdown_callback;
void *user_data;
@@ -180,6 +186,7 @@ struct host_listener {
/* It's important that the node structure is always first, so that the HOST_LISTENER_FROM_THREADED_NODE macro
* works properly.*/
struct aws_linked_list_node node;
+ bool pin_host_entry;
} threaded_data;
};
@@ -219,8 +226,33 @@ struct host_entry {
uint32_t resolves_since_last_request;
uint64_t last_resolve_request_timestamp_ns;
enum default_resolver_state state;
+ struct aws_array_list new_addresses;
+ struct aws_array_list expired_addresses;
+};
+
+/*
+ * A host entry's caches hold things of this type. By using this and not the host_address directly, our
+ * on_remove callbacks for the cache have access to the host_entry. We wouldn't need to do this if those
+ * callbacks supported user data injection, but they don't and too many internal code bases already depend
+ * on the public API.
+ */
+struct aws_host_address_cache_entry {
+ struct aws_host_address address;
+ struct host_entry *entry;
};
+int aws_host_address_cache_entry_copy(
+ const struct aws_host_address_cache_entry *from,
+ struct aws_host_address_cache_entry *to) {
+ if (aws_host_address_copy(&from->address, &to->address)) {
+ return AWS_OP_ERR;
+ }
+
+ to->entry = from->entry;
+
+ return AWS_OP_SUCCESS;
+}
+
static void s_shutdown_host_entry(struct host_entry *entry) {
aws_mutex_lock(&entry->entry_lock);
entry->state = DRS_SHUTTING_DOWN;
@@ -294,8 +326,6 @@ static void s_cleanup_default_resolver(struct aws_host_resolver *resolver) {
if (shutdown_callback != NULL) {
shutdown_callback(shutdown_completion_user_data);
}
-
- aws_global_thread_creator_decrement();
}
static void resolver_destroy(struct aws_host_resolver *resolver) {
@@ -325,6 +355,16 @@ struct pending_callback {
struct aws_linked_list_node node;
};
+static void s_clear_address_list(struct aws_array_list *address_list) {
+ for (size_t i = 0; i < aws_array_list_length(address_list); ++i) {
+ struct aws_host_address *address = NULL;
+ aws_array_list_get_at_ptr(address_list, (void **)&address, i);
+ aws_host_address_clean_up(address);
+ }
+
+ aws_array_list_clear(address_list);
+}
+
static void s_clean_up_host_entry(struct host_entry *entry) {
if (entry == NULL) {
return;
@@ -357,6 +397,13 @@ static void s_clean_up_host_entry(struct host_entry *entry) {
aws_cache_destroy(entry->failed_connection_a_records);
aws_cache_destroy(entry->failed_connection_aaaa_records);
aws_string_destroy((void *)entry->host_name);
+
+ s_clear_address_list(&entry->new_addresses);
+ aws_array_list_clean_up(&entry->new_addresses);
+
+ s_clear_address_list(&entry->expired_addresses);
+ aws_array_list_clean_up(&entry->expired_addresses);
+
aws_mem_release(entry->allocator, entry);
}
@@ -382,14 +429,56 @@ static void s_on_host_entry_shutdown_completion(void *user_data) {
}
}
+static int s_copy_address_into_array_list(struct aws_host_address *address, struct aws_array_list *address_list) {
+
+ /*
+ * This is the worst.
+ *
+ * We have to copy the cache address while we still have a write lock. Otherwise, connection failures
+ * can sneak in and destroy our address by moving the address to/from the various lru caches.
+ *
+ * But there's no nice copy construction into an array list, so we get to
+ * (1) Push a zeroed dummy element onto the array list
+ * (2) Get its pointer
+ * (3) Call aws_host_address_copy onto it. If that fails, pop the dummy element.
+ */
+ struct aws_host_address dummy;
+ AWS_ZERO_STRUCT(dummy);
+
+ if (aws_array_list_push_back(address_list, &dummy)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_host_address *dest_copy = NULL;
+ aws_array_list_get_at_ptr(address_list, (void **)&dest_copy, aws_array_list_length(address_list) - 1);
+ AWS_FATAL_ASSERT(dest_copy != NULL);
+
+ if (aws_host_address_copy(address, dest_copy)) {
+ aws_array_list_pop_back(address_list);
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static uint64_t s_get_system_time_for_default_resolver(struct aws_host_resolver *resolver) {
+ struct default_host_resolver *default_resolver = resolver->impl;
+
+ uint64_t timestamp = 0;
+ (*default_resolver->system_clock_fn)(&timestamp);
+
+ return timestamp;
+}
+
/* this only ever gets called after resolution has already run. We expect that the entry's lock
has been acquired for writing before this function is called and released afterwards. */
static inline void process_records(
- struct aws_allocator *allocator,
+ struct host_entry *host_entry,
struct aws_cache *records,
struct aws_cache *failed_records) {
- uint64_t timestamp = 0;
- aws_sys_clock_get_ticks(&timestamp);
+
+ struct aws_host_resolver *resolver = host_entry->resolver;
+ uint64_t timestamp = s_get_system_time_for_default_resolver(resolver);
size_t record_count = aws_cache_get_element_count(records);
size_t expired_records = 0;
@@ -397,16 +486,17 @@ static inline void process_records(
/* since this only ever gets called after resolution has already run, we're in a dns outage
* if everything is expired. Leave an element so we can keep trying. */
for (size_t index = 0; index < record_count && expired_records < record_count - 1; ++index) {
- struct aws_host_address *lru_element = aws_lru_cache_use_lru_element(records);
+ struct aws_host_address_cache_entry *lru_element_entry = aws_lru_cache_use_lru_element(records);
- if (lru_element->expiry < timestamp) {
+ if (lru_element_entry->address.expiry < timestamp) {
AWS_LOGF_DEBUG(
AWS_LS_IO_DNS,
"static: purging expired record %s for %s",
- lru_element->address->bytes,
- lru_element->host->bytes);
+ lru_element_entry->address.address->bytes,
+ lru_element_entry->address.host->bytes);
expired_records++;
- aws_cache_remove(records, lru_element->address);
+
+ aws_cache_remove(records, lru_element_entry->address.address);
}
}
@@ -420,30 +510,39 @@ static inline void process_records(
if (!record_count) {
size_t failed_count = aws_cache_get_element_count(failed_records);
for (size_t index = 0; index < failed_count; ++index) {
- struct aws_host_address *lru_element = aws_lru_cache_use_lru_element(failed_records);
-
- if (timestamp < lru_element->expiry) {
- struct aws_host_address *to_add = aws_mem_acquire(allocator, sizeof(struct aws_host_address));
-
- if (to_add && !aws_host_address_copy(lru_element, to_add)) {
- AWS_LOGF_INFO(
- AWS_LS_IO_DNS,
- "static: promoting spotty record %s for %s back to good list",
- lru_element->address->bytes,
- lru_element->host->bytes);
- if (aws_cache_put(records, to_add->address, to_add)) {
- aws_mem_release(allocator, to_add);
- continue;
- }
- /* we only want to promote one per process run.*/
- aws_cache_remove(failed_records, lru_element->address);
- break;
- }
-
- if (to_add) {
- aws_mem_release(allocator, to_add);
- }
+ struct aws_host_address_cache_entry *lru_element_entry = aws_lru_cache_use_lru_element(failed_records);
+ if (timestamp >= lru_element_entry->address.expiry) {
+ continue;
+ }
+
+ struct aws_host_address_cache_entry *to_add =
+ aws_mem_calloc(host_entry->allocator, 1, sizeof(struct aws_host_address_cache_entry));
+ if (to_add == NULL) {
+ continue;
+ }
+
+ if (aws_host_address_cache_entry_copy(lru_element_entry, to_add) ||
+ aws_cache_put(records, to_add->address.address, to_add)) {
+ aws_host_address_clean_up(&to_add->address);
+ aws_mem_release(host_entry->allocator, to_add);
+ continue;
}
+
+ /*
+ * Promoting an address from failed to good should trigger the new address callback
+ */
+ s_copy_address_into_array_list(&lru_element_entry->address, &host_entry->new_addresses);
+
+ AWS_LOGF_INFO(
+ AWS_LS_IO_DNS,
+ "static: promoting spotty record %s for %s back to good list",
+ lru_element_entry->address.address->bytes,
+ lru_element_entry->address.host->bytes);
+
+ aws_cache_remove(failed_records, lru_element_entry->address.address);
+
+ /* we only want to promote one per process run.*/
+ break;
}
}
}
@@ -473,7 +572,7 @@ static int resolver_record_connection_failure(struct aws_host_resolver *resolver
}
if (host_entry) {
- struct aws_host_address *cached_address = NULL;
+ struct aws_host_address_cache_entry *cached_address_entry = NULL;
aws_mutex_lock(&host_entry->entry_lock);
aws_mutex_unlock(&default_host_resolver->resolver_lock);
@@ -484,41 +583,45 @@ static int resolver_record_connection_failure(struct aws_host_resolver *resolver
? host_entry->failed_connection_aaaa_records
: host_entry->failed_connection_a_records;
- aws_cache_find(address_table, address->address, (void **)&cached_address);
+ aws_cache_find(address_table, address->address, (void **)&cached_address_entry);
- struct aws_host_address *address_copy = NULL;
- if (cached_address) {
- address_copy = aws_mem_acquire(resolver->allocator, sizeof(struct aws_host_address));
+ struct aws_host_address_cache_entry *address_entry_copy = NULL;
+ if (cached_address_entry) {
+ address_entry_copy = aws_mem_calloc(resolver->allocator, 1, sizeof(struct aws_host_address_cache_entry));
- if (!address_copy || aws_host_address_copy(cached_address, address_copy)) {
+ if (!address_entry_copy || aws_host_address_cache_entry_copy(cached_address_entry, address_entry_copy)) {
goto error_host_entry_cleanup;
}
- if (aws_cache_remove(address_table, cached_address->address)) {
+ /*
+ * This will trigger an expiration callback since the good caches add the removed address to the
+ * host_entry's expired list, via the cache's on_delete callback
+ */
+ if (aws_cache_remove(address_table, cached_address_entry->address.address)) {
goto error_host_entry_cleanup;
}
- address_copy->connection_failure_count += 1;
+ address_entry_copy->address.connection_failure_count += 1;
- if (aws_cache_put(failed_table, address_copy->address, address_copy)) {
+ if (aws_cache_put(failed_table, address_entry_copy->address.address, address_entry_copy)) {
goto error_host_entry_cleanup;
}
} else {
- if (aws_cache_find(failed_table, address->address, (void **)&cached_address)) {
+ if (aws_cache_find(failed_table, address->address, (void **)&cached_address_entry)) {
goto error_host_entry_cleanup;
}
- if (cached_address) {
- cached_address->connection_failure_count += 1;
+ if (cached_address_entry) {
+ cached_address_entry->address.connection_failure_count += 1;
}
}
aws_mutex_unlock(&host_entry->entry_lock);
return AWS_OP_SUCCESS;
error_host_entry_cleanup:
- if (address_copy) {
- aws_host_address_clean_up(address_copy);
- aws_mem_release(resolver->allocator, address_copy);
+ if (address_entry_copy) {
+ aws_host_address_clean_up(&address_entry_copy->address);
+ aws_mem_release(resolver->allocator, address_entry_copy);
}
aws_mutex_unlock(&host_entry->entry_lock);
return AWS_OP_ERR;
@@ -533,12 +636,12 @@ static int resolver_record_connection_failure(struct aws_host_resolver *resolver
* A bunch of convenience functions for the host resolver background thread function
*/
-static struct aws_host_address *s_find_cached_address_aux(
+static struct aws_host_address_cache_entry *s_find_cached_address_entry_aux(
struct aws_cache *primary_records,
struct aws_cache *fallback_records,
const struct aws_string *address) {
- struct aws_host_address *found = NULL;
+ struct aws_host_address_cache_entry *found = NULL;
aws_cache_find(primary_records, address, (void **)&found);
if (found == NULL) {
aws_cache_find(fallback_records, address, (void **)&found);
@@ -550,97 +653,89 @@ static struct aws_host_address *s_find_cached_address_aux(
/*
* Looks in both the good and failed connection record sets for a given host record
*/
-static struct aws_host_address *s_find_cached_address(
+static struct aws_host_address_cache_entry *s_find_cached_address_entry(
struct host_entry *entry,
const struct aws_string *address,
enum aws_address_record_type record_type) {
switch (record_type) {
case AWS_ADDRESS_RECORD_TYPE_AAAA:
- return s_find_cached_address_aux(entry->aaaa_records, entry->failed_connection_aaaa_records, address);
+ return s_find_cached_address_entry_aux(entry->aaaa_records, entry->failed_connection_aaaa_records, address);
case AWS_ADDRESS_RECORD_TYPE_A:
- return s_find_cached_address_aux(entry->a_records, entry->failed_connection_a_records, address);
+ return s_find_cached_address_entry_aux(entry->a_records, entry->failed_connection_a_records, address);
default:
return NULL;
}
}
-static struct aws_host_address *s_get_lru_address_aux(
+static struct aws_host_address_cache_entry *s_get_lru_address_entry_aux(
struct aws_cache *primary_records,
struct aws_cache *fallback_records) {
- struct aws_host_address *address = aws_lru_cache_use_lru_element(primary_records);
- if (address == NULL) {
+ struct aws_host_address_cache_entry *address_entry = aws_lru_cache_use_lru_element(primary_records);
+ if (address_entry == NULL) {
aws_lru_cache_use_lru_element(fallback_records);
}
- return address;
+ return address_entry;
}
/*
* Looks in both the good and failed connection record sets for the LRU host record
*/
-static struct aws_host_address *s_get_lru_address(struct host_entry *entry, enum aws_address_record_type record_type) {
+static struct aws_host_address_cache_entry *s_get_lru_address(
+ struct host_entry *entry,
+ enum aws_address_record_type record_type) {
switch (record_type) {
case AWS_ADDRESS_RECORD_TYPE_AAAA:
- return s_get_lru_address_aux(entry->aaaa_records, entry->failed_connection_aaaa_records);
+ return s_get_lru_address_entry_aux(entry->aaaa_records, entry->failed_connection_aaaa_records);
case AWS_ADDRESS_RECORD_TYPE_A:
- return s_get_lru_address_aux(entry->a_records, entry->failed_connection_a_records);
+ return s_get_lru_address_entry_aux(entry->a_records, entry->failed_connection_a_records);
default:
return NULL;
}
}
-static void s_clear_address_list(struct aws_array_list *address_list) {
- for (size_t i = 0; i < aws_array_list_length(address_list); ++i) {
- struct aws_host_address *address = NULL;
- aws_array_list_get_at_ptr(address_list, (void **)&address, i);
- aws_host_address_clean_up(address);
- }
-
- aws_array_list_clear(address_list);
-}
-
static void s_update_address_cache(
struct host_entry *host_entry,
struct aws_array_list *address_list,
- uint64_t new_expiration,
- struct aws_array_list *out_new_address_list) {
+ uint64_t new_expiration) {
AWS_PRECONDITION(host_entry);
AWS_PRECONDITION(address_list);
- AWS_PRECONDITION(out_new_address_list);
for (size_t i = 0; i < aws_array_list_length(address_list); ++i) {
struct aws_host_address *fresh_resolved_address = NULL;
aws_array_list_get_at_ptr(address_list, (void **)&fresh_resolved_address, i);
- struct aws_host_address *address_to_cache =
- s_find_cached_address(host_entry, fresh_resolved_address->address, fresh_resolved_address->record_type);
+ struct aws_host_address_cache_entry *address_to_cache_entry = s_find_cached_address_entry(
+ host_entry, fresh_resolved_address->address, fresh_resolved_address->record_type);
- if (address_to_cache) {
- address_to_cache->expiry = new_expiration;
+ if (address_to_cache_entry) {
+ address_to_cache_entry->address.expiry = new_expiration;
AWS_LOGF_TRACE(
AWS_LS_IO_DNS,
"static: updating expiry for %s for host %s to %llu",
- address_to_cache->address->bytes,
+ address_to_cache_entry->address.address->bytes,
host_entry->host_name->bytes,
(unsigned long long)new_expiration);
} else {
- address_to_cache = aws_mem_acquire(host_entry->allocator, sizeof(struct aws_host_address));
+ address_to_cache_entry =
+ aws_mem_calloc(host_entry->allocator, 1, sizeof(struct aws_host_address_cache_entry));
- aws_host_address_move(fresh_resolved_address, address_to_cache);
- address_to_cache->expiry = new_expiration;
+ aws_host_address_move(fresh_resolved_address, &address_to_cache_entry->address);
+ address_to_cache_entry->address.expiry = new_expiration;
+ address_to_cache_entry->entry = host_entry;
- struct aws_cache *address_table = address_to_cache->record_type == AWS_ADDRESS_RECORD_TYPE_AAAA
- ? host_entry->aaaa_records
- : host_entry->a_records;
+ struct aws_cache *address_table =
+ address_to_cache_entry->address.record_type == AWS_ADDRESS_RECORD_TYPE_AAAA ? host_entry->aaaa_records
+ : host_entry->a_records;
- if (aws_cache_put(address_table, address_to_cache->address, address_to_cache)) {
+ if (aws_cache_put(address_table, address_to_cache_entry->address.address, address_to_cache_entry)) {
AWS_LOGF_ERROR(
AWS_LS_IO_DNS,
"static: could not add new address to host entry cache for host '%s' in "
@@ -653,12 +748,12 @@ static void s_update_address_cache(
AWS_LOGF_DEBUG(
AWS_LS_IO_DNS,
"static: new address resolved %s for host %s caching",
- address_to_cache->address->bytes,
+ address_to_cache_entry->address.address->bytes,
host_entry->host_name->bytes);
struct aws_host_address new_address_copy;
- if (aws_host_address_copy(address_to_cache, &new_address_copy)) {
+ if (aws_host_address_copy(&address_to_cache_entry->address, &new_address_copy)) {
AWS_LOGF_ERROR(
AWS_LS_IO_DNS,
"static: could not copy address for new-address list for host '%s' in s_update_address_cache.",
@@ -667,7 +762,7 @@ static void s_update_address_cache(
continue;
}
- if (aws_array_list_push_back(out_new_address_list, &new_address_copy)) {
+ if (aws_array_list_push_back(&host_entry->new_addresses, &new_address_copy)) {
aws_host_address_clean_up(&new_address_copy);
AWS_LOGF_ERROR(
@@ -682,45 +777,26 @@ static void s_update_address_cache(
}
static void s_copy_address_into_callback_set(
- struct aws_host_address *address,
+ struct aws_host_address_cache_entry *entry,
struct aws_array_list *callback_addresses,
const struct aws_string *host_name) {
- if (address) {
- address->use_count += 1;
-
- /*
- * This is the worst.
- *
- * We have to copy the cache address while we still have a write lock. Otherwise, connection failures
- * can sneak in and destroy our address by moving the address to/from the various lru caches.
- *
- * But there's no nice copy construction into an array list, so we get to
- * (1) Push a zeroed dummy element onto the array list
- * (2) Get its pointer
- * (3) Call aws_host_address_copy onto it. If that fails, pop the dummy element.
- */
- struct aws_host_address dummy;
- AWS_ZERO_STRUCT(dummy);
-
- if (aws_array_list_push_back(callback_addresses, &dummy)) {
+ if (entry != NULL) {
+ if (s_copy_address_into_array_list(&entry->address, callback_addresses)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_DNS,
+ "static: failed to vend address %s for host %s to caller",
+ entry->address.address->bytes,
+ host_name->bytes);
return;
}
- struct aws_host_address *dest_copy = NULL;
- aws_array_list_get_at_ptr(
- callback_addresses, (void **)&dest_copy, aws_array_list_length(callback_addresses) - 1);
- AWS_FATAL_ASSERT(dest_copy != NULL);
-
- if (aws_host_address_copy(address, dest_copy)) {
- aws_array_list_pop_back(callback_addresses);
- return;
- }
+ entry->address.use_count += 1;
AWS_LOGF_TRACE(
AWS_LS_IO_DNS,
"static: vending address %s for host %s to caller",
- address->address->bytes,
+ entry->address.address->bytes,
host_name->bytes);
}
}
@@ -842,11 +918,13 @@ static void s_resolver_thread_destroy_listeners(struct aws_linked_list *listener
/* Assumes no lock is held. The listener_list is owned by the resolver thread, so no lock is necessary. We also don't
* want a lock held when calling the resolver-address callback.*/
static void s_resolver_thread_notify_listeners(
+ struct aws_linked_list *listener_list,
const struct aws_array_list *new_address_list,
- struct aws_linked_list *listener_list) {
+ const struct aws_array_list *expired_address_list) {
AWS_PRECONDITION(new_address_list);
AWS_PRECONDITION(listener_list);
+ AWS_PRECONDITION(expired_address_list);
/* Go through each listener in our list. */
for (struct aws_linked_list_node *listener_node = aws_linked_list_begin(listener_list);
@@ -854,14 +932,35 @@ static void s_resolver_thread_notify_listeners(
listener_node = aws_linked_list_next(listener_node)) {
struct host_listener *listener = HOST_LISTENER_FROM_THREADED_NODE(listener_node);
- /* If we have new adddresses, notify the resolved-address callback if one exists */
+ /* If we have new addresses, notify the resolved-address callback if one exists */
if (aws_array_list_length(new_address_list) > 0 && listener->resolved_address_callback != NULL) {
listener->resolved_address_callback(
(struct aws_host_listener *)listener, new_address_list, listener->user_data);
}
+
+ /* If we have expired addresses, notify the expired-address callback if one exists */
+ if (aws_array_list_length(expired_address_list) > 0 && listener->expired_address_callback != NULL) {
+ listener->expired_address_callback(
+ (struct aws_host_listener *)listener, expired_address_list, listener->user_data);
+ }
}
}
+static bool s_is_host_entry_pinned_by_listener(struct aws_linked_list *listener_list) {
+ AWS_PRECONDITION(listener_list);
+
+ for (struct aws_linked_list_node *listener_node = aws_linked_list_begin(listener_list);
+ listener_node != aws_linked_list_end(listener_list);
+ listener_node = aws_linked_list_next(listener_node)) {
+ struct host_listener *listener = HOST_LISTENER_FROM_THREADED_NODE(listener_node);
+ if (listener->threaded_data.pin_host_entry) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void resolver_thread_fn(void *arg) {
struct host_entry *host_entry = arg;
@@ -873,28 +972,35 @@ static void resolver_thread_fn(void *arg) {
uint64_t max_no_solicitation_interval =
aws_timestamp_convert(unsolicited_resolve_max, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
+ struct aws_linked_list listener_list;
+ aws_linked_list_init(&listener_list);
+
+ struct aws_linked_list listener_destroy_list;
+ aws_linked_list_init(&listener_destroy_list);
+
+ bool keep_going = true;
+
struct aws_array_list address_list;
+ AWS_ZERO_STRUCT(address_list);
+ struct aws_array_list new_address_list;
+ AWS_ZERO_STRUCT(new_address_list);
+ struct aws_array_list expired_address_list;
+ AWS_ZERO_STRUCT(expired_address_list);
+
if (aws_array_list_init_dynamic(&address_list, host_entry->allocator, 4, sizeof(struct aws_host_address))) {
- return;
+ goto done;
}
- struct aws_array_list new_address_list;
if (aws_array_list_init_dynamic(&new_address_list, host_entry->allocator, 4, sizeof(struct aws_host_address))) {
- aws_array_list_clean_up(&address_list);
- return;
+ goto done;
}
- struct aws_linked_list listener_list;
- aws_linked_list_init(&listener_list);
-
- struct aws_linked_list listener_destroy_list;
- aws_linked_list_init(&listener_destroy_list);
+ if (aws_array_list_init_dynamic(&expired_address_list, host_entry->allocator, 4, sizeof(struct aws_host_address))) {
+ goto done;
+ }
- bool keep_going = true;
while (keep_going) {
- AWS_LOGF_TRACE(AWS_LS_IO_DNS, "static, resolving %s", aws_string_c_str(host_entry->host_name));
-
/* resolve and then process each record */
int err_code = AWS_ERROR_SUCCESS;
if (host_entry->resolution_config.impl(
@@ -902,8 +1008,23 @@ static void resolver_thread_fn(void *arg) {
err_code = aws_last_error();
}
- uint64_t timestamp = 0;
- aws_sys_clock_get_ticks(&timestamp);
+
+ if (err_code == AWS_ERROR_SUCCESS) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_DNS,
+ "static, resolving host %s successful, returned %d addresses",
+ aws_string_c_str(host_entry->host_name),
+ (int)aws_array_list_length(&address_list));
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_IO_DNS,
+ "static, resolving host %s failed, ec %d (%s)",
+ aws_string_c_str(host_entry->host_name),
+ err_code,
+ aws_error_debug_str(err_code));
+ }
+
+ uint64_t timestamp = s_get_system_time_for_default_resolver(host_entry->resolver);
uint64_t new_expiry = timestamp + (host_entry->resolution_config.max_ttl * NS_PER_SEC);
struct aws_linked_list pending_resolve_copy;
@@ -918,15 +1039,15 @@ static void resolver_thread_fn(void *arg) {
aws_mutex_lock(&host_entry->entry_lock);
if (!err_code) {
- s_update_address_cache(host_entry, &address_list, new_expiry, &new_address_list);
+ s_update_address_cache(host_entry, &address_list, new_expiry);
}
/*
* process and clean_up records in the entry. occasionally, failed connect records will be upgraded
* for retry.
*/
- process_records(host_entry->allocator, host_entry->aaaa_records, host_entry->failed_connection_aaaa_records);
- process_records(host_entry->allocator, host_entry->a_records, host_entry->failed_connection_a_records);
+ process_records(host_entry, host_entry->aaaa_records, host_entry->failed_connection_aaaa_records);
+ process_records(host_entry, host_entry->a_records, host_entry->failed_connection_a_records);
aws_linked_list_swap_contents(&pending_resolve_copy, &host_entry->pending_resolution_callbacks);
@@ -962,9 +1083,21 @@ static void resolver_thread_fn(void *arg) {
host_entry->host_name);
aws_mutex_unlock(&host_entry->entry_lock);
- AWS_ASSERT(err_code != AWS_ERROR_SUCCESS || aws_array_list_length(&callback_address_list) > 0);
+ size_t callback_address_list_size = aws_array_list_length(&callback_address_list);
+ if (callback_address_list_size > 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_DNS,
+ "static, invoking resolution callback for host %s with %d addresses",
+ aws_string_c_str(host_entry->host_name),
+ (int)callback_address_list_size);
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_DNS,
+ "static, invoking resolution callback for host %s with failure",
+ aws_string_c_str(host_entry->host_name));
+ }
- if (aws_array_list_length(&callback_address_list) > 0) {
+ if (callback_address_list_size > 0) {
pending_callback->callback(
host_entry->resolver,
host_entry->host_name,
@@ -973,8 +1106,9 @@ static void resolver_thread_fn(void *arg) {
pending_callback->user_data);
} else {
+ int error_code = (err_code != AWS_ERROR_SUCCESS) ? err_code : AWS_IO_DNS_QUERY_FAILED;
pending_callback->callback(
- host_entry->resolver, host_entry->host_name, err_code, NULL, pending_callback->user_data);
+ host_entry->resolver, host_entry->host_name, error_code, NULL, pending_callback->user_data);
}
s_clear_address_list(&callback_address_list);
@@ -1014,8 +1148,8 @@ static void resolver_thread_fn(void *arg) {
aws_mutex_lock(&host_entry->entry_lock);
- uint64_t now = 0;
- aws_sys_clock_get_ticks(&now);
+ uint64_t now = s_get_system_time_for_default_resolver(host_entry->resolver);
+ bool pinned = s_is_host_entry_pinned_by_listener(&listener_list);
/*
* Ideally this should just be time-based, but given the non-determinism of waits (and spurious wake ups) and
@@ -1031,7 +1165,7 @@ static void resolver_thread_fn(void *arg) {
* final clean up of this entry.
*/
if (host_entry->resolves_since_last_request > unsolicited_resolve_max &&
- host_entry->last_resolve_request_timestamp_ns + max_no_solicitation_interval < now) {
+ host_entry->last_resolve_request_timestamp_ns + max_no_solicitation_interval < now && !pinned) {
host_entry->state = DRS_SHUTTING_DOWN;
}
@@ -1045,6 +1179,9 @@ static void resolver_thread_fn(void *arg) {
}
}
+ aws_array_list_swap_contents(&host_entry->new_addresses, &new_address_list);
+ aws_array_list_swap_contents(&host_entry->expired_addresses, &expired_address_list);
+
aws_mutex_unlock(&host_entry->entry_lock);
aws_mutex_unlock(&resolver->resolver_lock);
@@ -1052,9 +1189,10 @@ static void resolver_thread_fn(void *arg) {
s_resolver_thread_destroy_listeners(&listener_destroy_list);
/* Notify our local listeners of new addresses. */
- s_resolver_thread_notify_listeners(&new_address_list, &listener_list);
+ s_resolver_thread_notify_listeners(&listener_list, &new_address_list, &expired_address_list);
s_clear_address_list(&new_address_list);
+ s_clear_address_list(&expired_address_list);
}
AWS_LOGF_DEBUG(
@@ -1063,26 +1201,48 @@ static void resolver_thread_fn(void *arg) {
"of the ttl, or this thread is being forcibly shutdown. Killing thread.",
host_entry->host_name->bytes)
+done:
+
+ AWS_FATAL_ASSERT(aws_array_list_length(&address_list) == 0);
+ AWS_FATAL_ASSERT(aws_array_list_length(&new_address_list) == 0);
+ AWS_FATAL_ASSERT(aws_array_list_length(&expired_address_list) == 0);
+
aws_array_list_clean_up(&address_list);
aws_array_list_clean_up(&new_address_list);
+ aws_array_list_clean_up(&expired_address_list);
/* please don't fail */
aws_thread_current_at_exit(s_on_host_entry_shutdown_completion, host_entry);
}
-static void on_address_value_removed(void *value) {
- struct aws_host_address *host_address = value;
-
+static void on_cache_entry_removed_helper(struct aws_host_address_cache_entry *entry) {
AWS_LOGF_DEBUG(
AWS_LS_IO_DNS,
"static: purging address %s for host %s from "
"the cache due to cache eviction or shutdown",
- host_address->address->bytes,
- host_address->host->bytes);
+ entry->address.address->bytes,
+ entry->address.host->bytes);
+
+ struct aws_allocator *allocator = entry->address.allocator;
+ aws_host_address_clean_up(&entry->address);
+ aws_mem_release(allocator, entry);
+}
+
+static void on_good_address_entry_removed(void *value) {
+ struct aws_host_address_cache_entry *entry = value;
+ if (entry == NULL) {
+ return;
+ }
- struct aws_allocator *allocator = host_address->allocator;
- aws_host_address_clean_up(host_address);
- aws_mem_release(allocator, host_address);
+ s_copy_address_into_array_list(&entry->address, &entry->entry->expired_addresses);
+
+ on_cache_entry_removed_helper(entry);
+}
+
+static void on_failed_address_entry_removed(void *value) {
+ struct aws_host_address_cache_entry *entry = value;
+
+ on_cache_entry_removed_helper(entry);
}
/*
@@ -1120,7 +1280,7 @@ static inline int create_and_init_host_entry(
aws_hash_string,
aws_hash_callback_string_eq,
NULL,
- on_address_value_removed,
+ on_good_address_entry_removed,
config->max_ttl);
if (AWS_UNLIKELY(!new_host_entry->a_records)) {
goto setup_host_entry_error;
@@ -1131,7 +1291,7 @@ static inline int create_and_init_host_entry(
aws_hash_string,
aws_hash_callback_string_eq,
NULL,
- on_address_value_removed,
+ on_good_address_entry_removed,
config->max_ttl);
if (AWS_UNLIKELY(!new_host_entry->aaaa_records)) {
goto setup_host_entry_error;
@@ -1142,7 +1302,7 @@ static inline int create_and_init_host_entry(
aws_hash_string,
aws_hash_callback_string_eq,
NULL,
- on_address_value_removed,
+ on_failed_address_entry_removed,
config->max_ttl);
if (AWS_UNLIKELY(!new_host_entry->failed_connection_a_records)) {
goto setup_host_entry_error;
@@ -1153,12 +1313,22 @@ static inline int create_and_init_host_entry(
aws_hash_string,
aws_hash_callback_string_eq,
NULL,
- on_address_value_removed,
+ on_failed_address_entry_removed,
config->max_ttl);
if (AWS_UNLIKELY(!new_host_entry->failed_connection_aaaa_records)) {
goto setup_host_entry_error;
}
+ if (aws_array_list_init_dynamic(
+ &new_host_entry->new_addresses, new_host_entry->allocator, 4, sizeof(struct aws_host_address))) {
+ goto setup_host_entry_error;
+ }
+
+ if (aws_array_list_init_dynamic(
+ &new_host_entry->expired_addresses, new_host_entry->allocator, 4, sizeof(struct aws_host_address))) {
+ goto setup_host_entry_error;
+ }
+
aws_linked_list_init(&new_host_entry->pending_resolution_callbacks);
pending_callback = aws_mem_acquire(resolver->allocator, sizeof(struct pending_callback));
@@ -1187,7 +1357,10 @@ static inline int create_and_init_host_entry(
goto setup_host_entry_error;
}
- aws_thread_launch(&new_host_entry->resolver_thread, resolver_thread_fn, new_host_entry, NULL);
+ struct aws_thread_options thread_options = *aws_default_thread_options();
+ thread_options.join_strategy = AWS_TJS_MANAGED;
+
+ aws_thread_launch(&new_host_entry->resolver_thread, resolver_thread_fn, new_host_entry, &thread_options);
++default_host_resolver->pending_host_entry_shutdown_completion_callbacks;
return AWS_OP_SUCCESS;
@@ -1213,8 +1386,7 @@ static int default_resolve_host(
AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "id=%p: Host resolution requested for %s", (void *)resolver, host_name->bytes);
- uint64_t timestamp = 0;
- aws_sys_clock_get_ticks(&timestamp);
+ uint64_t timestamp = s_get_system_time_for_default_resolver(resolver);
struct default_host_resolver *default_host_resolver = resolver->impl;
aws_mutex_lock(&default_host_resolver->resolver_lock);
@@ -1253,8 +1425,11 @@ static int default_resolve_host(
host_entry->last_resolve_request_timestamp_ns = timestamp;
host_entry->resolves_since_last_request = 0;
- struct aws_host_address *aaaa_record = aws_lru_cache_use_lru_element(host_entry->aaaa_records);
- struct aws_host_address *a_record = aws_lru_cache_use_lru_element(host_entry->a_records);
+ struct aws_host_address_cache_entry *aaaa_entry = aws_lru_cache_use_lru_element(host_entry->aaaa_records);
+ struct aws_host_address *aaaa_record = (aaaa_entry != NULL) ? &aaaa_entry->address : NULL;
+ struct aws_host_address_cache_entry *a_entry = aws_lru_cache_use_lru_element(host_entry->a_records);
+ struct aws_host_address *a_record = (a_entry != NULL) ? &a_entry->address : NULL;
+
struct aws_host_address address_array[2];
AWS_ZERO_ARRAY(address_array);
struct aws_array_list callback_address_list;
@@ -1378,14 +1553,13 @@ static void s_aws_host_resolver_destroy(struct aws_host_resolver *resolver) {
struct aws_host_resolver *aws_host_resolver_new_default(
struct aws_allocator *allocator,
- size_t max_entries,
- struct aws_event_loop_group *el_group,
- const struct aws_shutdown_callback_options *shutdown_options) {
+ struct aws_host_resolver_default_options *options) {
+ AWS_FATAL_ASSERT(options != NULL);
+
/* NOTE: we don't use el_group yet, but we will in the future. Also, we
don't want host resolvers getting cleaned up after el_groups; this will force that
in bindings, and encourage it in C land. */
- (void)el_group;
- AWS_ASSERT(el_group);
+ AWS_ASSERT(options->el_group);
struct aws_host_resolver *resolver = NULL;
struct default_host_resolver *default_host_resolver = NULL;
@@ -1406,7 +1580,7 @@ struct aws_host_resolver *aws_host_resolver_new_default(
AWS_LS_IO_DNS,
"id=%p: Initializing default host resolver with %llu max host entries.",
(void *)resolver,
- (unsigned long long)max_entries);
+ (unsigned long long)options->max_entries);
resolver->vtable = &s_vtable;
resolver->allocator = allocator;
@@ -1417,12 +1591,10 @@ struct aws_host_resolver *aws_host_resolver_new_default(
default_host_resolver->state = DRS_ACTIVE;
aws_mutex_init(&default_host_resolver->resolver_lock);
- aws_global_thread_creator_increment();
-
if (aws_hash_table_init(
&default_host_resolver->host_entry_table,
allocator,
- max_entries,
+ options->max_entries,
aws_hash_string,
aws_hash_callback_string_eq,
NULL,
@@ -1433,7 +1605,7 @@ struct aws_host_resolver *aws_host_resolver_new_default(
if (aws_hash_table_init(
&default_host_resolver->listener_entry_table,
allocator,
- max_entries,
+ options->max_entries,
aws_hash_string,
aws_hash_callback_string_eq,
aws_hash_callback_string_destroy,
@@ -1443,8 +1615,14 @@ struct aws_host_resolver *aws_host_resolver_new_default(
aws_ref_count_init(&resolver->ref_count, resolver, (aws_simple_completion_callback *)s_aws_host_resolver_destroy);
- if (shutdown_options != NULL) {
- resolver->shutdown_options = *shutdown_options;
+ if (options->shutdown_options != NULL) {
+ resolver->shutdown_options = *options->shutdown_options;
+ }
+
+ if (options->system_clock_override_fn != NULL) {
+ default_host_resolver->system_clock_fn = options->system_clock_override_fn;
+ } else {
+ default_host_resolver->system_clock_fn = aws_sys_clock_get_ticks;
}
return resolver;
@@ -1491,6 +1669,7 @@ static struct aws_host_listener *default_add_host_listener(
const struct aws_host_listener_options *options) {
AWS_PRECONDITION(resolver);
+ bool success = false;
if (options == NULL) {
AWS_LOGF_ERROR(AWS_LS_IO_DNS, "Cannot create host resolver listener; options structure is NULL.");
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
@@ -1513,25 +1692,38 @@ static struct aws_host_listener *default_add_host_listener(
(void *)listener,
(const char *)options->host_name.ptr);
- aws_host_resolver_acquire(resolver);
- listener->resolver = resolver;
+ struct default_host_resolver *default_host_resolver = resolver->impl;
+
+ listener->resolver = aws_host_resolver_acquire(resolver);
listener->host_name = aws_string_new_from_cursor(resolver->allocator, &options->host_name);
+ if (listener->host_name == NULL) {
+ goto done;
+ }
+
listener->resolved_address_callback = options->resolved_address_callback;
- listener->shutdown_callback = options->shutdown_callback;
+ listener->expired_address_callback = options->expired_address_callback;
listener->user_data = options->user_data;
-
- struct default_host_resolver *default_host_resolver = resolver->impl;
+ listener->threaded_data.pin_host_entry = options->pin_host_entry;
/* Add the listener to a host listener entry in the host listener entry table. */
aws_mutex_lock(&default_host_resolver->resolver_lock);
if (s_add_host_listener_to_listener_entry(default_host_resolver, listener->host_name, listener)) {
- aws_mem_release(resolver->allocator, listener);
- listener = NULL;
+ goto done;
}
+ success = true;
+ listener->shutdown_callback = options->shutdown_callback;
+
+done:
+
aws_mutex_unlock(&default_host_resolver->resolver_lock);
+ if (!success) {
+ s_host_listener_destroy(listener);
+ listener = NULL;
+ }
+
return (struct aws_host_listener *)listener;
}
diff --git a/contrib/restricted/aws/aws-c-io/source/io.c b/contrib/restricted/aws/aws-c-io/source/io.c
index dc0092a76a..55b9d0eb13 100644
--- a/contrib/restricted/aws/aws-c-io/source/io.c
+++ b/contrib/restricted/aws/aws-c-io/source/io.c
@@ -10,6 +10,10 @@
#define AWS_DEFINE_ERROR_INFO_IO(CODE, STR) [(CODE)-0x0400] = AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-io")
+#define AWS_DEFINE_ERROR_PKCS11_CKR(CKR) \
+ AWS_DEFINE_ERROR_INFO_IO( \
+ AWS_ERROR_PKCS11_##CKR, "A PKCS#11 (Cryptoki) library function failed with return value " #CKR)
+
/* clang-format off */
static struct aws_error_info s_errors[] = {
AWS_DEFINE_ERROR_INFO_IO(
@@ -136,7 +140,7 @@ static struct aws_error_info s_errors[] = {
AWS_IO_STREAM_READ_FAILED,
"Stream failed to read from the underlying io source"),
AWS_DEFINE_ERROR_INFO_IO(
- AWS_IO_INVALID_FILE_HANDLE,
+ DEPRECATED_AWS_IO_INVALID_FILE_HANDLE,
"Operation failed because the file handle was invalid"),
AWS_DEFINE_ERROR_INFO_IO(
AWS_IO_SHARED_LIBRARY_LOAD_FAILURE,
@@ -156,6 +160,128 @@ static struct aws_error_info s_errors[] = {
AWS_DEFINE_ERROR_INFO_IO(
AWS_IO_RETRY_PERMISSION_DENIED,
"Retry cannot be attempted because the retry strategy has prevented the operation."),
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED,
+ "TLS digest was created with an unsupported algorithm"),
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED,
+ "TLS signature algorithm is currently unsupported."),
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_ERROR_PKCS11_VERSION_UNSUPPORTED,
+ "The PKCS#11 library uses an unsupported API version."),
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_ERROR_PKCS11_TOKEN_NOT_FOUND,
+ "Could not pick PKCS#11 token matching search criteria (none found, or multiple found)"),
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_ERROR_PKCS11_KEY_NOT_FOUND,
+ "Could not pick PKCS#11 key matching search criteria (none found, or multiple found)"),
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED,
+ "PKCS#11 key type not supported"),
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_ERROR_PKCS11_UNKNOWN_CRYPTOKI_RETURN_VALUE,
+ "A PKCS#11 (Cryptoki) library function failed with an unknown return value (CKR_). See log for more details."),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CANCEL),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_HOST_MEMORY),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SLOT_ID_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_GENERAL_ERROR),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_FAILED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ARGUMENTS_BAD),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NO_EVENT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NEED_TO_CREATE_THREADS),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CANT_LOCK),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_READ_ONLY),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_SENSITIVE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_TYPE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_VALUE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ACTION_PROHIBITED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DATA_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DATA_LEN_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DEVICE_ERROR),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DEVICE_MEMORY),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DEVICE_REMOVED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ENCRYPTED_DATA_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ENCRYPTED_DATA_LEN_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_CANCELED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_NOT_PARALLEL),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_NOT_SUPPORTED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_HANDLE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_SIZE_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_TYPE_INCONSISTENT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_NOT_NEEDED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_CHANGED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_NEEDED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_INDIGESTIBLE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_FUNCTION_NOT_PERMITTED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_NOT_WRAPPABLE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_UNEXTRACTABLE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MECHANISM_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MECHANISM_PARAM_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_OBJECT_HANDLE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_OPERATION_ACTIVE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_OPERATION_NOT_INITIALIZED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_INCORRECT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_LEN_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_EXPIRED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_LOCKED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_CLOSED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_COUNT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_HANDLE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_PARALLEL_NOT_SUPPORTED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_READ_ONLY),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_EXISTS),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_READ_ONLY_EXISTS),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_READ_WRITE_SO_EXISTS),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SIGNATURE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SIGNATURE_LEN_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TEMPLATE_INCOMPLETE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TEMPLATE_INCONSISTENT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TOKEN_NOT_PRESENT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TOKEN_NOT_RECOGNIZED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TOKEN_WRITE_PROTECTED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_UNWRAPPING_KEY_HANDLE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_UNWRAPPING_KEY_SIZE_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_ALREADY_LOGGED_IN),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_NOT_LOGGED_IN),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_PIN_NOT_INITIALIZED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_TYPE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_ANOTHER_ALREADY_LOGGED_IN),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_TOO_MANY_TYPES),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPED_KEY_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPED_KEY_LEN_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPING_KEY_HANDLE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPING_KEY_SIZE_RANGE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPING_KEY_TYPE_INCONSISTENT),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_RANDOM_SEED_NOT_SUPPORTED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_RANDOM_NO_RNG),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DOMAIN_PARAMS_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CURVE_NOT_SUPPORTED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_BUFFER_TOO_SMALL),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SAVED_STATE_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_INFORMATION_SENSITIVE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_STATE_UNSAVEABLE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CRYPTOKI_NOT_INITIALIZED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CRYPTOKI_ALREADY_INITIALIZED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MUTEX_BAD),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MUTEX_NOT_LOCKED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NEW_PIN_MODE),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NEXT_OTP),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_EXCEEDED_MAX_ITERATIONS),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FIPS_SELF_TEST_FAILED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_LIBRARY_LOAD_FAILED),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_TOO_WEAK),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PUBLIC_KEY_INVALID),
+ AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_REJECTED),
+
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_ERROR_IO_PINNED_EVENT_LOOP_MISMATCH,
+ "A connection was requested on an event loop that is not associated with the client bootstrap's event loop group."),
+
+ AWS_DEFINE_ERROR_INFO_IO(
+ AWS_ERROR_PKCS11_ENCODING_ERROR,
+ "A PKCS#11 (Cryptoki) library function was unable to ASN.1 (DER) encode a data structure. See log for more details."),
};
/* clang-format on */
@@ -186,7 +312,13 @@ static struct aws_log_subject_info s_io_log_subject_infos[] = {
DEFINE_LOG_SUBJECT_INFO(
AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY,
"exp-backoff-strategy",
- "Subject for exponential backoff retry strategy")};
+ "Subject for exponential backoff retry strategy"),
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "standard-retry-strategy",
+ "Subject for standard retry strategy"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_PKCS11, "pkcs11", "Subject for PKCS#11 library operations"),
+};
static struct aws_log_subject_info_list s_io_log_subject_list = {
.subject_list = s_io_log_subject_infos,
@@ -212,6 +344,7 @@ void aws_io_library_init(struct aws_allocator *allocator) {
void aws_io_library_clean_up(void) {
if (s_io_library_initialized) {
s_io_library_initialized = false;
+ aws_thread_join_all_managed();
aws_tls_clean_up_static_state();
aws_unregister_error_info(&s_list);
aws_unregister_log_subject_info_list(&s_io_log_subject_list);
diff --git a/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c b/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c
index 8957e6c2b6..c7ad9251a3 100644
--- a/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c
+++ b/contrib/restricted/aws/aws-c-io/source/linux/epoll_event_loop.c
@@ -79,6 +79,7 @@ static struct aws_event_loop_vtable s_vtable = {
struct epoll_loop {
struct aws_task_scheduler scheduler;
struct aws_thread thread_created_on;
+ struct aws_thread_options thread_options;
aws_thread_id_t thread_joined_to;
struct aws_atomic_var running_thread_id;
struct aws_io_handle read_task_handle;
@@ -110,14 +111,19 @@ enum {
int aws_open_nonblocking_posix_pipe(int pipe_fds[2]);
/* Setup edge triggered epoll with a scheduler. */
-struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock) {
+struct aws_event_loop *aws_event_loop_new_default_with_options(
+ struct aws_allocator *alloc,
+ const struct aws_event_loop_options *options) {
+ AWS_PRECONDITION(options);
+ AWS_PRECONDITION(options->clock);
+
struct aws_event_loop *loop = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop));
if (!loop) {
return NULL;
}
AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered epoll", (void *)loop);
- if (aws_event_loop_init_base(loop, alloc, clock)) {
+ if (aws_event_loop_init_base(loop, alloc, options->clock)) {
goto clean_up_loop;
}
@@ -126,6 +132,12 @@ struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, a
goto cleanup_base_loop;
}
+ if (options->thread_options) {
+ epoll_loop->thread_options = *options->thread_options;
+ } else {
+ epoll_loop->thread_options = *aws_default_thread_options();
+ }
+
/* initialize thread id to NULL, it should be updated when the event loop thread starts. */
aws_atomic_init_ptr(&epoll_loop->running_thread_id, NULL);
@@ -259,7 +271,9 @@ static int s_run(struct aws_event_loop *event_loop) {
AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop);
epoll_loop->should_continue = true;
- if (aws_thread_launch(&epoll_loop->thread_created_on, &s_main_loop, event_loop, NULL)) {
+ aws_thread_increment_unjoined_count();
+ if (aws_thread_launch(&epoll_loop->thread_created_on, &s_main_loop, event_loop, &epoll_loop->thread_options)) {
+ aws_thread_decrement_unjoined_count();
AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop);
epoll_loop->should_continue = false;
return AWS_OP_ERR;
@@ -303,7 +317,9 @@ static int s_stop(struct aws_event_loop *event_loop) {
static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) {
struct epoll_loop *epoll_loop = event_loop->impl_data;
- return aws_thread_join(&epoll_loop->thread_created_on);
+ int result = aws_thread_join(&epoll_loop->thread_created_on);
+ aws_thread_decrement_unjoined_count();
+ return result;
}
static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) {
@@ -568,8 +584,10 @@ static void s_main_loop(void *args) {
* process queued subscription cleanups.
*/
while (epoll_loop->should_continue) {
+
AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: waiting for a maximum of %d ms", (void *)event_loop, timeout);
int event_count = epoll_wait(epoll_loop->epoll_fd, events, MAX_EVENTS, timeout);
+ aws_event_loop_register_tick_start(event_loop);
AWS_LOGF_TRACE(
AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, event_count);
@@ -646,6 +664,8 @@ static void s_main_loop(void *args) {
(unsigned long long)timeout_ns,
timeout);
}
+
+ aws_event_loop_register_tick_end(event_loop);
}
AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop);
diff --git a/contrib/restricted/aws/aws-c-io/source/pem_utils.c b/contrib/restricted/aws/aws-c-io/source/pem_utils.c
new file mode 100644
index 0000000000..c3843ffd4a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/source/pem_utils.c
@@ -0,0 +1,98 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/string.h>
+#include <aws/io/private/pem_utils.h>
+
+enum aws_pem_util_state {
+ BEGIN,
+ ON_DATA,
+ END,
+};
+
+static const struct aws_byte_cursor begin_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----BEGIN");
+static const struct aws_byte_cursor end_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----END");
+static const struct aws_byte_cursor dashes = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----");
+
+int aws_sanitize_pem(struct aws_byte_buf *pem, struct aws_allocator *allocator) {
+ if (!pem->len) {
+ /* reject files with no PEM data */
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ struct aws_byte_buf clean_pem_buf;
+ if (aws_byte_buf_init(&clean_pem_buf, allocator, pem->len)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_cursor pem_cursor = aws_byte_cursor_from_buf(pem);
+ int state = BEGIN;
+
+ for (size_t i = 0; i < pem_cursor.len; i++) {
+ /* parse through the pem once */
+ char current = *(pem_cursor.ptr + i);
+ switch (state) {
+ case BEGIN:
+ if (current == '-') {
+ struct aws_byte_cursor compare_cursor = pem_cursor;
+ compare_cursor.len = begin_header.len;
+ compare_cursor.ptr += i;
+ if (aws_byte_cursor_eq(&compare_cursor, &begin_header)) {
+ state = ON_DATA;
+ i--;
+ }
+ }
+ break;
+ case ON_DATA:
+ /* start copying everything */
+ if (current == '-') {
+ struct aws_byte_cursor compare_cursor = pem_cursor;
+ compare_cursor.len = end_header.len;
+ compare_cursor.ptr += i;
+ if (aws_byte_cursor_eq(&compare_cursor, &end_header)) {
+ /* Copy the end header string and start to search for the end part of a pem */
+ state = END;
+ aws_byte_buf_append(&clean_pem_buf, &end_header);
+ i += (end_header.len - 1);
+ break;
+ }
+ }
+ aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current);
+ break;
+ case END:
+ if (current == '-') {
+ struct aws_byte_cursor compare_cursor = pem_cursor;
+ compare_cursor.len = dashes.len;
+ compare_cursor.ptr += i;
+ if (aws_byte_cursor_eq(&compare_cursor, &dashes)) {
+ /* End part of a pem, copy the last 5 dashes and a new line, then ignore everything before next
+ * begin header */
+ state = BEGIN;
+ aws_byte_buf_append(&clean_pem_buf, &dashes);
+ i += (dashes.len - 1);
+ aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)'\n');
+ break;
+ }
+ }
+ aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (clean_pem_buf.len == 0) {
+ /* No valid data remains after sanitization. File might have been the wrong format */
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ struct aws_byte_cursor clean_pem_cursor = aws_byte_cursor_from_buf(&clean_pem_buf);
+ aws_byte_buf_reset(pem, true);
+ aws_byte_buf_append_dynamic(pem, &clean_pem_cursor);
+ aws_byte_buf_clean_up(&clean_pem_buf);
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_byte_buf_clean_up(&clean_pem_buf);
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11.c b/contrib/restricted/aws/aws-c-io/source/pkcs11.c
new file mode 100644
index 0000000000..943f153fd5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/source/pkcs11.c
@@ -0,0 +1,1371 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/io/pkcs11.h>
+
+#include "pkcs11_private.h"
+
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/io/logging.h>
+#include <aws/io/shared_library.h>
+
+#include <inttypes.h>
+
+/* NOTE 1: even though we currently include the v2.40 headers, they're compatible with any v2.x library.
+ * NOTE 2: v3.x is backwards compatible with 2.x, and even claims to be 2.40 if you check its version the 2.x way */
+#define AWS_SUPPORTED_CRYPTOKI_VERSION_MAJOR 2
+#define AWS_MIN_SUPPORTED_CRYPTOKI_VERSION_MINOR 20
+
+/* clang-format off */
+/*
+ * DER encoded DigestInfo value to be prefixed to the hash, used for RSA signing
+ * See https://tools.ietf.org/html/rfc3447#page-43
+ * (Notes to help understand what's going on here with DER encoding)
+ * 0x30 nn - Sequence of tags, nn bytes, including hash, nn = mm+jj+4 (PKCS11 DigestInfo)
+ * 0x30 mm - Subsequence of tags, mm bytes (ii+4) (PKCS11
+ * 0x06 ii - OID encoding, ii bytes, see X.680 - this identifies the hash algorithm
+ * 0x05 00 - NULL
+ * 0x04 jj - OCTET, nn = mm + jj + 4
+ * Digest (nn - mm - 4 bytes)
+ */
+static const uint8_t SHA1_PREFIX_TO_RSA_SIG[] = { 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14 };
+static const uint8_t SHA256_PREFIX_TO_RSA_SIG[] = { 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 };
+static const uint8_t SHA384_PREFIX_TO_RSA_SIG[] = { 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30 };
+static const uint8_t SHA512_PREFIX_TO_RSA_SIG[] = { 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40 };
+static const uint8_t SHA224_PREFIX_TO_RSA_SIG[] = { 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c };
+/* clang-format on */
+
+const char *aws_tls_hash_algorithm_str(enum aws_tls_hash_algorithm hash) {
+ /* clang-format off */
+ switch (hash) {
+ case (AWS_TLS_HASH_SHA1): return "SHA1";
+ case (AWS_TLS_HASH_SHA224): return "SHA224";
+ case (AWS_TLS_HASH_SHA256): return "SHA256";
+ case (AWS_TLS_HASH_SHA384): return "SHA384";
+ case (AWS_TLS_HASH_SHA512): return "SHA512";
+ default: return "<UNKNOWN HASH ALGORITHM>";
+ }
+ /* clang-format on */
+}
+
+const char *aws_tls_signature_algorithm_str(enum aws_tls_signature_algorithm signature) {
+ /* clang-format off */
+ switch (signature) {
+ case (AWS_TLS_SIGNATURE_RSA): return "RSA";
+ case (AWS_TLS_SIGNATURE_ECDSA): return "ECDSA";
+ default: return "<UNKNOWN SIGNATURE ALGORITHM>";
+ }
+ /* clang-format on */
+}
+
+/* Return c-string for PKCS#11 CKR_* contants. */
+const char *aws_pkcs11_ckr_str(CK_RV rv) {
+ /* clang-format off */
+ switch (rv) {
+ case (CKR_OK): return "CKR_OK";
+ case (CKR_CANCEL): return "CKR_CANCEL";
+ case (CKR_HOST_MEMORY): return "CKR_HOST_MEMORY";
+ case (CKR_SLOT_ID_INVALID): return "CKR_SLOT_ID_INVALID";
+ case (CKR_GENERAL_ERROR): return "CKR_GENERAL_ERROR";
+ case (CKR_FUNCTION_FAILED): return "CKR_FUNCTION_FAILED";
+ case (CKR_ARGUMENTS_BAD): return "CKR_ARGUMENTS_BAD";
+ case (CKR_NO_EVENT): return "CKR_NO_EVENT";
+ case (CKR_NEED_TO_CREATE_THREADS): return "CKR_NEED_TO_CREATE_THREADS";
+ case (CKR_CANT_LOCK): return "CKR_CANT_LOCK";
+ case (CKR_ATTRIBUTE_READ_ONLY): return "CKR_ATTRIBUTE_READ_ONLY";
+ case (CKR_ATTRIBUTE_SENSITIVE): return "CKR_ATTRIBUTE_SENSITIVE";
+ case (CKR_ATTRIBUTE_TYPE_INVALID): return "CKR_ATTRIBUTE_TYPE_INVALID";
+ case (CKR_ATTRIBUTE_VALUE_INVALID): return "CKR_ATTRIBUTE_VALUE_INVALID";
+ case (CKR_ACTION_PROHIBITED): return "CKR_ACTION_PROHIBITED";
+ case (CKR_DATA_INVALID): return "CKR_DATA_INVALID";
+ case (CKR_DATA_LEN_RANGE): return "CKR_DATA_LEN_RANGE";
+ case (CKR_DEVICE_ERROR): return "CKR_DEVICE_ERROR";
+ case (CKR_DEVICE_MEMORY): return "CKR_DEVICE_MEMORY";
+ case (CKR_DEVICE_REMOVED): return "CKR_DEVICE_REMOVED";
+ case (CKR_ENCRYPTED_DATA_INVALID): return "CKR_ENCRYPTED_DATA_INVALID";
+ case (CKR_ENCRYPTED_DATA_LEN_RANGE): return "CKR_ENCRYPTED_DATA_LEN_RANGE";
+ case (CKR_FUNCTION_CANCELED): return "CKR_FUNCTION_CANCELED";
+ case (CKR_FUNCTION_NOT_PARALLEL): return "CKR_FUNCTION_NOT_PARALLEL";
+ case (CKR_FUNCTION_NOT_SUPPORTED): return "CKR_FUNCTION_NOT_SUPPORTED";
+ case (CKR_KEY_HANDLE_INVALID): return "CKR_KEY_HANDLE_INVALID";
+ case (CKR_KEY_SIZE_RANGE): return "CKR_KEY_SIZE_RANGE";
+ case (CKR_KEY_TYPE_INCONSISTENT): return "CKR_KEY_TYPE_INCONSISTENT";
+ case (CKR_KEY_NOT_NEEDED): return "CKR_KEY_NOT_NEEDED";
+ case (CKR_KEY_CHANGED): return "CKR_KEY_CHANGED";
+ case (CKR_KEY_NEEDED): return "CKR_KEY_NEEDED";
+ case (CKR_KEY_INDIGESTIBLE): return "CKR_KEY_INDIGESTIBLE";
+ case (CKR_KEY_FUNCTION_NOT_PERMITTED): return "CKR_KEY_FUNCTION_NOT_PERMITTED";
+ case (CKR_KEY_NOT_WRAPPABLE): return "CKR_KEY_NOT_WRAPPABLE";
+ case (CKR_KEY_UNEXTRACTABLE): return "CKR_KEY_UNEXTRACTABLE";
+ case (CKR_MECHANISM_INVALID): return "CKR_MECHANISM_INVALID";
+ case (CKR_MECHANISM_PARAM_INVALID): return "CKR_MECHANISM_PARAM_INVALID";
+ case (CKR_OBJECT_HANDLE_INVALID): return "CKR_OBJECT_HANDLE_INVALID";
+ case (CKR_OPERATION_ACTIVE): return "CKR_OPERATION_ACTIVE";
+ case (CKR_OPERATION_NOT_INITIALIZED): return "CKR_OPERATION_NOT_INITIALIZED";
+ case (CKR_PIN_INCORRECT): return "CKR_PIN_INCORRECT";
+ case (CKR_PIN_INVALID): return "CKR_PIN_INVALID";
+ case (CKR_PIN_LEN_RANGE): return "CKR_PIN_LEN_RANGE";
+ case (CKR_PIN_EXPIRED): return "CKR_PIN_EXPIRED";
+ case (CKR_PIN_LOCKED): return "CKR_PIN_LOCKED";
+ case (CKR_SESSION_CLOSED): return "CKR_SESSION_CLOSED";
+ case (CKR_SESSION_COUNT): return "CKR_SESSION_COUNT";
+ case (CKR_SESSION_HANDLE_INVALID): return "CKR_SESSION_HANDLE_INVALID";
+ case (CKR_SESSION_PARALLEL_NOT_SUPPORTED): return "CKR_SESSION_PARALLEL_NOT_SUPPORTED";
+ case (CKR_SESSION_READ_ONLY): return "CKR_SESSION_READ_ONLY";
+ case (CKR_SESSION_EXISTS): return "CKR_SESSION_EXISTS";
+ case (CKR_SESSION_READ_ONLY_EXISTS): return "CKR_SESSION_READ_ONLY_EXISTS";
+ case (CKR_SESSION_READ_WRITE_SO_EXISTS): return "CKR_SESSION_READ_WRITE_SO_EXISTS";
+ case (CKR_SIGNATURE_INVALID): return "CKR_SIGNATURE_INVALID";
+ case (CKR_SIGNATURE_LEN_RANGE): return "CKR_SIGNATURE_LEN_RANGE";
+ case (CKR_TEMPLATE_INCOMPLETE): return "CKR_TEMPLATE_INCOMPLETE";
+ case (CKR_TEMPLATE_INCONSISTENT): return "CKR_TEMPLATE_INCONSISTENT";
+ case (CKR_TOKEN_NOT_PRESENT): return "CKR_TOKEN_NOT_PRESENT";
+ case (CKR_TOKEN_NOT_RECOGNIZED): return "CKR_TOKEN_NOT_RECOGNIZED";
+ case (CKR_TOKEN_WRITE_PROTECTED): return "CKR_TOKEN_WRITE_PROTECTED";
+ case (CKR_UNWRAPPING_KEY_HANDLE_INVALID): return "CKR_UNWRAPPING_KEY_HANDLE_INVALID";
+ case (CKR_UNWRAPPING_KEY_SIZE_RANGE): return "CKR_UNWRAPPING_KEY_SIZE_RANGE";
+ case (CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT): return "CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT";
+ case (CKR_USER_ALREADY_LOGGED_IN): return "CKR_USER_ALREADY_LOGGED_IN";
+ case (CKR_USER_NOT_LOGGED_IN): return "CKR_USER_NOT_LOGGED_IN";
+ case (CKR_USER_PIN_NOT_INITIALIZED): return "CKR_USER_PIN_NOT_INITIALIZED";
+ case (CKR_USER_TYPE_INVALID): return "CKR_USER_TYPE_INVALID";
+ case (CKR_USER_ANOTHER_ALREADY_LOGGED_IN): return "CKR_USER_ANOTHER_ALREADY_LOGGED_IN";
+ case (CKR_USER_TOO_MANY_TYPES): return "CKR_USER_TOO_MANY_TYPES";
+ case (CKR_WRAPPED_KEY_INVALID): return "CKR_WRAPPED_KEY_INVALID";
+ case (CKR_WRAPPED_KEY_LEN_RANGE): return "CKR_WRAPPED_KEY_LEN_RANGE";
+ case (CKR_WRAPPING_KEY_HANDLE_INVALID): return "CKR_WRAPPING_KEY_HANDLE_INVALID";
+ case (CKR_WRAPPING_KEY_SIZE_RANGE): return "CKR_WRAPPING_KEY_SIZE_RANGE";
+ case (CKR_WRAPPING_KEY_TYPE_INCONSISTENT): return "CKR_WRAPPING_KEY_TYPE_INCONSISTENT";
+ case (CKR_RANDOM_SEED_NOT_SUPPORTED): return "CKR_RANDOM_SEED_NOT_SUPPORTED";
+ case (CKR_RANDOM_NO_RNG): return "CKR_RANDOM_NO_RNG";
+ case (CKR_DOMAIN_PARAMS_INVALID): return "CKR_DOMAIN_PARAMS_INVALID";
+ case (CKR_CURVE_NOT_SUPPORTED): return "CKR_CURVE_NOT_SUPPORTED";
+ case (CKR_BUFFER_TOO_SMALL): return "CKR_BUFFER_TOO_SMALL";
+ case (CKR_SAVED_STATE_INVALID): return "CKR_SAVED_STATE_INVALID";
+ case (CKR_INFORMATION_SENSITIVE): return "CKR_INFORMATION_SENSITIVE";
+ case (CKR_STATE_UNSAVEABLE): return "CKR_STATE_UNSAVEABLE";
+ case (CKR_CRYPTOKI_NOT_INITIALIZED): return "CKR_CRYPTOKI_NOT_INITIALIZED";
+ case (CKR_CRYPTOKI_ALREADY_INITIALIZED): return "CKR_CRYPTOKI_ALREADY_INITIALIZED";
+ case (CKR_MUTEX_BAD): return "CKR_MUTEX_BAD";
+ case (CKR_MUTEX_NOT_LOCKED): return "CKR_MUTEX_NOT_LOCKED";
+ case (CKR_NEW_PIN_MODE): return "CKR_NEW_PIN_MODE";
+ case (CKR_NEXT_OTP): return "CKR_NEXT_OTP";
+ case (CKR_EXCEEDED_MAX_ITERATIONS): return "CKR_EXCEEDED_MAX_ITERATIONS";
+ case (CKR_FIPS_SELF_TEST_FAILED): return "CKR_FIPS_SELF_TEST_FAILED";
+ case (CKR_LIBRARY_LOAD_FAILED): return "CKR_LIBRARY_LOAD_FAILED";
+ case (CKR_PIN_TOO_WEAK): return "CKR_PIN_TOO_WEAK";
+ case (CKR_PUBLIC_KEY_INVALID): return "CKR_PUBLIC_KEY_INVALID";
+ case (CKR_FUNCTION_REJECTED): return "CKR_FUNCTION_REJECTED";
+ default: return "<UNKNOWN RETURN VALUE>";
+ }
+ /* clang-format on */
+}
+
+/* Translate from a CK_RV to an AWS error code */
+static int s_ck_to_aws_error(CK_RV rv) {
+ AWS_ASSERT(rv != CKR_OK);
+ /* clang-format off */
+ switch (rv) {
+ case (CKR_CANCEL): return AWS_ERROR_PKCS11_CKR_CANCEL;
+ case (CKR_HOST_MEMORY): return AWS_ERROR_PKCS11_CKR_HOST_MEMORY;
+ case (CKR_SLOT_ID_INVALID): return AWS_ERROR_PKCS11_CKR_SLOT_ID_INVALID;
+ case (CKR_GENERAL_ERROR): return AWS_ERROR_PKCS11_CKR_GENERAL_ERROR;
+ case (CKR_FUNCTION_FAILED): return AWS_ERROR_PKCS11_CKR_FUNCTION_FAILED;
+ case (CKR_ARGUMENTS_BAD): return AWS_ERROR_PKCS11_CKR_ARGUMENTS_BAD;
+ case (CKR_NO_EVENT): return AWS_ERROR_PKCS11_CKR_NO_EVENT;
+ case (CKR_NEED_TO_CREATE_THREADS): return AWS_ERROR_PKCS11_CKR_NEED_TO_CREATE_THREADS;
+ case (CKR_CANT_LOCK): return AWS_ERROR_PKCS11_CKR_CANT_LOCK;
+ case (CKR_ATTRIBUTE_READ_ONLY): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_READ_ONLY;
+ case (CKR_ATTRIBUTE_SENSITIVE): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_SENSITIVE;
+ case (CKR_ATTRIBUTE_TYPE_INVALID): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_TYPE_INVALID;
+ case (CKR_ATTRIBUTE_VALUE_INVALID): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_VALUE_INVALID;
+ case (CKR_ACTION_PROHIBITED): return AWS_ERROR_PKCS11_CKR_ACTION_PROHIBITED;
+ case (CKR_DATA_INVALID): return AWS_ERROR_PKCS11_CKR_DATA_INVALID;
+ case (CKR_DATA_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_DATA_LEN_RANGE;
+ case (CKR_DEVICE_ERROR): return AWS_ERROR_PKCS11_CKR_DEVICE_ERROR;
+ case (CKR_DEVICE_MEMORY): return AWS_ERROR_PKCS11_CKR_DEVICE_MEMORY;
+ case (CKR_DEVICE_REMOVED): return AWS_ERROR_PKCS11_CKR_DEVICE_REMOVED;
+ case (CKR_ENCRYPTED_DATA_INVALID): return AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_INVALID;
+ case (CKR_ENCRYPTED_DATA_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_LEN_RANGE;
+ case (CKR_FUNCTION_CANCELED): return AWS_ERROR_PKCS11_CKR_FUNCTION_CANCELED;
+ case (CKR_FUNCTION_NOT_PARALLEL): return AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_PARALLEL;
+ case (CKR_FUNCTION_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_SUPPORTED;
+ case (CKR_KEY_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_KEY_HANDLE_INVALID;
+ case (CKR_KEY_SIZE_RANGE): return AWS_ERROR_PKCS11_CKR_KEY_SIZE_RANGE;
+ case (CKR_KEY_TYPE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_KEY_TYPE_INCONSISTENT;
+ case (CKR_KEY_NOT_NEEDED): return AWS_ERROR_PKCS11_CKR_KEY_NOT_NEEDED;
+ case (CKR_KEY_CHANGED): return AWS_ERROR_PKCS11_CKR_KEY_CHANGED;
+ case (CKR_KEY_NEEDED): return AWS_ERROR_PKCS11_CKR_KEY_NEEDED;
+ case (CKR_KEY_INDIGESTIBLE): return AWS_ERROR_PKCS11_CKR_KEY_INDIGESTIBLE;
+ case (CKR_KEY_FUNCTION_NOT_PERMITTED): return AWS_ERROR_PKCS11_CKR_KEY_FUNCTION_NOT_PERMITTED;
+ case (CKR_KEY_NOT_WRAPPABLE): return AWS_ERROR_PKCS11_CKR_KEY_NOT_WRAPPABLE;
+ case (CKR_KEY_UNEXTRACTABLE): return AWS_ERROR_PKCS11_CKR_KEY_UNEXTRACTABLE;
+ case (CKR_MECHANISM_INVALID): return AWS_ERROR_PKCS11_CKR_MECHANISM_INVALID;
+ case (CKR_MECHANISM_PARAM_INVALID): return AWS_ERROR_PKCS11_CKR_MECHANISM_PARAM_INVALID;
+ case (CKR_OBJECT_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_OBJECT_HANDLE_INVALID;
+ case (CKR_OPERATION_ACTIVE): return AWS_ERROR_PKCS11_CKR_OPERATION_ACTIVE;
+ case (CKR_OPERATION_NOT_INITIALIZED): return AWS_ERROR_PKCS11_CKR_OPERATION_NOT_INITIALIZED;
+ case (CKR_PIN_INCORRECT): return AWS_ERROR_PKCS11_CKR_PIN_INCORRECT;
+ case (CKR_PIN_INVALID): return AWS_ERROR_PKCS11_CKR_PIN_INVALID;
+ case (CKR_PIN_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_PIN_LEN_RANGE;
+ case (CKR_PIN_EXPIRED): return AWS_ERROR_PKCS11_CKR_PIN_EXPIRED;
+ case (CKR_PIN_LOCKED): return AWS_ERROR_PKCS11_CKR_PIN_LOCKED;
+ case (CKR_SESSION_CLOSED): return AWS_ERROR_PKCS11_CKR_SESSION_CLOSED;
+ case (CKR_SESSION_COUNT): return AWS_ERROR_PKCS11_CKR_SESSION_COUNT;
+ case (CKR_SESSION_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_SESSION_HANDLE_INVALID;
+ case (CKR_SESSION_PARALLEL_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_SESSION_PARALLEL_NOT_SUPPORTED;
+ case (CKR_SESSION_READ_ONLY): return AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY;
+ case (CKR_SESSION_EXISTS): return AWS_ERROR_PKCS11_CKR_SESSION_EXISTS;
+ case (CKR_SESSION_READ_ONLY_EXISTS): return AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY_EXISTS;
+ case (CKR_SESSION_READ_WRITE_SO_EXISTS): return AWS_ERROR_PKCS11_CKR_SESSION_READ_WRITE_SO_EXISTS;
+ case (CKR_SIGNATURE_INVALID): return AWS_ERROR_PKCS11_CKR_SIGNATURE_INVALID;
+ case (CKR_SIGNATURE_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_SIGNATURE_LEN_RANGE;
+ case (CKR_TEMPLATE_INCOMPLETE): return AWS_ERROR_PKCS11_CKR_TEMPLATE_INCOMPLETE;
+ case (CKR_TEMPLATE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_TEMPLATE_INCONSISTENT;
+ case (CKR_TOKEN_NOT_PRESENT): return AWS_ERROR_PKCS11_CKR_TOKEN_NOT_PRESENT;
+ case (CKR_TOKEN_NOT_RECOGNIZED): return AWS_ERROR_PKCS11_CKR_TOKEN_NOT_RECOGNIZED;
+ case (CKR_TOKEN_WRITE_PROTECTED): return AWS_ERROR_PKCS11_CKR_TOKEN_WRITE_PROTECTED;
+ case (CKR_UNWRAPPING_KEY_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_HANDLE_INVALID;
+ case (CKR_UNWRAPPING_KEY_SIZE_RANGE): return AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_SIZE_RANGE;
+ case (CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT;
+ case (CKR_USER_ALREADY_LOGGED_IN): return AWS_ERROR_PKCS11_CKR_USER_ALREADY_LOGGED_IN;
+ case (CKR_USER_NOT_LOGGED_IN): return AWS_ERROR_PKCS11_CKR_USER_NOT_LOGGED_IN;
+ case (CKR_USER_PIN_NOT_INITIALIZED): return AWS_ERROR_PKCS11_CKR_USER_PIN_NOT_INITIALIZED;
+ case (CKR_USER_TYPE_INVALID): return AWS_ERROR_PKCS11_CKR_USER_TYPE_INVALID;
+ case (CKR_USER_ANOTHER_ALREADY_LOGGED_IN): return AWS_ERROR_PKCS11_CKR_USER_ANOTHER_ALREADY_LOGGED_IN;
+ case (CKR_USER_TOO_MANY_TYPES): return AWS_ERROR_PKCS11_CKR_USER_TOO_MANY_TYPES;
+ case (CKR_WRAPPED_KEY_INVALID): return AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_INVALID;
+ case (CKR_WRAPPED_KEY_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_LEN_RANGE;
+ case (CKR_WRAPPING_KEY_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_HANDLE_INVALID;
+ case (CKR_WRAPPING_KEY_SIZE_RANGE): return AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_SIZE_RANGE;
+ case (CKR_WRAPPING_KEY_TYPE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_TYPE_INCONSISTENT;
+ case (CKR_RANDOM_SEED_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_RANDOM_SEED_NOT_SUPPORTED;
+ case (CKR_RANDOM_NO_RNG): return AWS_ERROR_PKCS11_CKR_RANDOM_NO_RNG;
+ case (CKR_DOMAIN_PARAMS_INVALID): return AWS_ERROR_PKCS11_CKR_DOMAIN_PARAMS_INVALID;
+ case (CKR_CURVE_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_CURVE_NOT_SUPPORTED;
+ case (CKR_BUFFER_TOO_SMALL): return AWS_ERROR_PKCS11_CKR_BUFFER_TOO_SMALL;
+ case (CKR_SAVED_STATE_INVALID): return AWS_ERROR_PKCS11_CKR_SAVED_STATE_INVALID;
+ case (CKR_INFORMATION_SENSITIVE): return AWS_ERROR_PKCS11_CKR_INFORMATION_SENSITIVE;
+ case (CKR_STATE_UNSAVEABLE): return AWS_ERROR_PKCS11_CKR_STATE_UNSAVEABLE;
+ case (CKR_CRYPTOKI_NOT_INITIALIZED): return AWS_ERROR_PKCS11_CKR_CRYPTOKI_NOT_INITIALIZED;
+ case (CKR_CRYPTOKI_ALREADY_INITIALIZED): return AWS_ERROR_PKCS11_CKR_CRYPTOKI_ALREADY_INITIALIZED;
+ case (CKR_MUTEX_BAD): return AWS_ERROR_PKCS11_CKR_MUTEX_BAD;
+ case (CKR_MUTEX_NOT_LOCKED): return AWS_ERROR_PKCS11_CKR_MUTEX_NOT_LOCKED;
+ case (CKR_NEW_PIN_MODE): return AWS_ERROR_PKCS11_CKR_NEW_PIN_MODE;
+ case (CKR_NEXT_OTP): return AWS_ERROR_PKCS11_CKR_NEXT_OTP;
+ case (CKR_EXCEEDED_MAX_ITERATIONS): return AWS_ERROR_PKCS11_CKR_EXCEEDED_MAX_ITERATIONS;
+ case (CKR_FIPS_SELF_TEST_FAILED): return AWS_ERROR_PKCS11_CKR_FIPS_SELF_TEST_FAILED;
+ case (CKR_LIBRARY_LOAD_FAILED): return AWS_ERROR_PKCS11_CKR_LIBRARY_LOAD_FAILED;
+ case (CKR_PIN_TOO_WEAK): return AWS_ERROR_PKCS11_CKR_PIN_TOO_WEAK;
+ case (CKR_PUBLIC_KEY_INVALID): return AWS_ERROR_PKCS11_CKR_PUBLIC_KEY_INVALID;
+ case (CKR_FUNCTION_REJECTED): return AWS_ERROR_PKCS11_CKR_FUNCTION_REJECTED;
+ default: return AWS_ERROR_PKCS11_UNKNOWN_CRYPTOKI_RETURN_VALUE;
+ }
+ /* clang-format on */
+}
+
+/* Return c-string for PKCS#11 CKK_* contants. */
+static const char *s_ckk_str(CK_KEY_TYPE key_type) {
+ /* clang-format off */
+ switch(key_type) {
+ case (CKK_RSA): return "CKK_RSA";
+ case (CKK_DSA): return "CKK_DSA";
+ case (CKK_DH): return "CKK_DH";
+ case (CKK_EC): return "CKK_EC";
+ case (CKK_X9_42_DH): return "CKK_X9_42_DH";
+ case (CKK_KEA): return "CKK_KEA";
+ case (CKK_GENERIC_SECRET): return "CKK_GENERIC_SECRET";
+ case (CKK_RC2): return "CKK_RC2";
+ case (CKK_RC4): return "CKK_RC4";
+ case (CKK_DES): return "CKK_DES";
+ case (CKK_DES2): return "CKK_DES2";
+ case (CKK_DES3): return "CKK_DES3";
+ case (CKK_CAST): return "CKK_CAST";
+ case (CKK_CAST3): return "CKK_CAST3";
+ case (CKK_CAST128): return "CKK_CAST128";
+ case (CKK_RC5): return "CKK_RC5";
+ case (CKK_IDEA): return "CKK_IDEA";
+ case (CKK_SKIPJACK): return "CKK_SKIPJACK";
+ case (CKK_BATON): return "CKK_BATON";
+ case (CKK_JUNIPER): return "CKK_JUNIPER";
+ case (CKK_CDMF): return "CKK_CDMF";
+ case (CKK_AES): return "CKK_AES";
+ case (CKK_BLOWFISH): return "CKK_BLOWFISH";
+ case (CKK_TWOFISH): return "CKK_TWOFISH";
+ case (CKK_SECURID): return "CKK_SECURID";
+ case (CKK_HOTP): return "CKK_HOTP";
+ case (CKK_ACTI): return "CKK_ACTI";
+ case (CKK_CAMELLIA): return "CKK_CAMELLIA";
+ case (CKK_ARIA): return "CKK_ARIA";
+ case (CKK_MD5_HMAC): return "CKK_MD5_HMAC";
+ case (CKK_SHA_1_HMAC): return "CKK_SHA_1_HMAC";
+ case (CKK_RIPEMD128_HMAC): return "CKK_RIPEMD128_HMAC";
+ case (CKK_RIPEMD160_HMAC): return "CKK_RIPEMD160_HMAC";
+ case (CKK_SHA256_HMAC): return "CKK_SHA256_HMAC";
+ case (CKK_SHA384_HMAC): return "CKK_SHA384_HMAC";
+ case (CKK_SHA512_HMAC): return "CKK_SHA512_HMAC";
+ case (CKK_SHA224_HMAC): return "CKK_SHA224_HMAC";
+ case (CKK_SEED): return "CKK_SEED";
+ case (CKK_GOSTR3410): return "CKK_GOSTR3410";
+ case (CKK_GOSTR3411): return "CKK_GOSTR3411";
+ case (CKK_GOST28147): return "CKK_GOST28147";
+ default: return "<UNKNOWN KEY TYPE>";
+ }
+ /* clang-format on */
+}
+
+/* Log the failure of a PKCS#11 function, and call aws_raise_error() with the appropriate AWS error code */
+static int s_raise_ck_error(const struct aws_pkcs11_lib *pkcs11_lib, const char *fn_name, CK_RV rv) {
+ int aws_err = s_ck_to_aws_error(rv);
+
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p: %s() failed. PKCS#11 error: %s (0x%08lX). AWS error: %s",
+ (void *)pkcs11_lib,
+ fn_name,
+ aws_pkcs11_ckr_str(rv),
+ rv,
+ aws_error_name(aws_err));
+
+ return aws_raise_error(aws_err);
+}
+
+/* Log the failure of a PKCS#11 session-handle function and call aws_raise_error() with the appropriate error code */
+static int s_raise_ck_session_error(
+ const struct aws_pkcs11_lib *pkcs11_lib,
+ const char *fn_name,
+ CK_SESSION_HANDLE session,
+ CK_RV rv) {
+
+ int aws_err = s_ck_to_aws_error(rv);
+
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: %s() failed. PKCS#11 error: %s (0x%08lX). AWS error: %s",
+ (void *)pkcs11_lib,
+ session,
+ fn_name,
+ aws_pkcs11_ckr_str(rv),
+ rv,
+ aws_error_name(aws_err));
+
+ return aws_raise_error(aws_err);
+}
+
+/* PKCS#11 often pads strings with ' ' */
+static bool s_is_padding(uint8_t c) {
+ return c == ' ';
+}
+
+/* Return byte-cursor to string with ' ' padding trimmed off.
+ * PKCS#11 structs commonly stores strings in fixed-width arrays, padded by ' ' instead of null-terminator */
+static struct aws_byte_cursor s_trim_padding(const uint8_t *str, size_t len) {
+ const struct aws_byte_cursor src = aws_byte_cursor_from_array(str, len);
+ return aws_byte_cursor_right_trim_pred(&src, s_is_padding);
+}
+
+/* Callback for PKCS#11 library to create a mutex.
+ * Described in PKCS11-base-v2.40 section 3.7 */
+static CK_RV s_pkcs11_create_mutex(CK_VOID_PTR_PTR mutex_out) {
+ if (mutex_out == NULL) {
+ return CKR_GENERAL_ERROR;
+ }
+
+ /* Using the default allocator because there's no way to know which PKCS#11 instance is invoking this callback */
+ struct aws_allocator *allocator = aws_default_allocator();
+
+ struct aws_mutex *mutex = aws_mem_calloc(allocator, 1, sizeof(struct aws_mutex));
+ if (aws_mutex_init(mutex)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "PKCS#11 CreateMutex() failed, error %s", aws_error_name(aws_last_error()));
+ aws_mem_release(allocator, mutex);
+ *mutex_out = NULL;
+ return CKR_GENERAL_ERROR;
+ }
+
+ *mutex_out = mutex;
+ return CKR_OK;
+}
+
+/* Callback for PKCS#11 library to destroy a mutex.
+ * Described in PKCS11-base-v2.40 section 3.7 */
+static CK_RV s_pkcs11_destroy_mutex(CK_VOID_PTR mutex_ptr) {
+ if (mutex_ptr == NULL) {
+ return CKR_GENERAL_ERROR;
+ }
+
+ struct aws_mutex *mutex = mutex_ptr;
+ aws_mutex_clean_up(mutex);
+ aws_mem_release(aws_default_allocator(), mutex);
+ return CKR_OK;
+}
+
+/* Callback for PKCS#11 library to lock a mutex.
+ * Described in PKCS11-base-v2.40 section 3.7 */
+static CK_RV s_pkcs11_lock_mutex(CK_VOID_PTR mutex_ptr) {
+ if (mutex_ptr == NULL) {
+ return CKR_GENERAL_ERROR;
+ }
+
+ struct aws_mutex *mutex = mutex_ptr;
+ if (aws_mutex_lock(mutex)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "PKCS#11 LockMutex() failed, error %s", aws_error_name(aws_last_error()));
+ return CKR_GENERAL_ERROR;
+ }
+
+ return CKR_OK;
+}
+
+/* Callback for PKCS#11 library to unlock a mutex.
+ * Described in PKCS11-base-v2.40 section 3.7 */
+static CK_RV s_pkcs11_unlock_mutex(CK_VOID_PTR mutex_ptr) {
+ if (mutex_ptr == NULL) {
+ return CKR_GENERAL_ERROR;
+ }
+
+ struct aws_mutex *mutex = mutex_ptr;
+ if (aws_mutex_unlock(mutex)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "PKCS#11 LockMutex() failed, error %s", aws_error_name(aws_last_error()));
+
+ /* NOTE: Cryptoki has a CKR_MUTEX_NOT_LOCKED error code.
+ * But posix doesn't treat this as an error and neither does windows so ¯\_(ツ)_/¯
+ * If aws_mutex_unlock() failed here, it was something else. */
+ return CKR_GENERAL_ERROR;
+ }
+
+ return CKR_OK;
+}
+
+struct aws_pkcs11_lib {
+ struct aws_ref_count ref_count;
+ struct aws_allocator *allocator;
+
+ struct aws_shared_library shared_lib;
+
+ CK_FUNCTION_LIST_PTR function_list;
+
+ /* If true, C_Finalize() should be called when last ref-count is released */
+ bool finalize_on_cleanup;
+};
+
+/* Invoked when last ref-count is released. Free all resources.
+ * Note that this is also called if initialization fails half-way through */
+static void s_pkcs11_lib_destroy(void *user_data) {
+ struct aws_pkcs11_lib *pkcs11_lib = user_data;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_PKCS11,
+ "id=%p: Unloading PKCS#11. C_Finalize:%s",
+ (void *)pkcs11_lib,
+ pkcs11_lib->finalize_on_cleanup ? "yes" : "omit");
+
+ if (pkcs11_lib->finalize_on_cleanup) {
+ CK_RV rv = pkcs11_lib->function_list->C_Finalize(NULL);
+ if (rv != CKR_OK) {
+ /* Log about it, but continue cleaning up */
+ s_raise_ck_error(pkcs11_lib, "C_Finalize", rv);
+ }
+ }
+
+ aws_shared_library_clean_up(&pkcs11_lib->shared_lib);
+ aws_mem_release(pkcs11_lib->allocator, pkcs11_lib);
+}
+
+struct aws_pkcs11_lib *aws_pkcs11_lib_new(
+ struct aws_allocator *allocator,
+ const struct aws_pkcs11_lib_options *options) {
+
+ /* Validate options */
+ switch (options->initialize_finalize_behavior) {
+ case AWS_PKCS11_LIB_DEFAULT_BEHAVIOR:
+ case AWS_PKCS11_LIB_OMIT_INITIALIZE:
+ case AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE:
+ break;
+ default:
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Invalid PKCS#11 behavior arg: %d", options->initialize_finalize_behavior);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* Create the struct */
+ struct aws_pkcs11_lib *pkcs11_lib = aws_mem_calloc(allocator, 1, sizeof(struct aws_pkcs11_lib));
+ aws_ref_count_init(&pkcs11_lib->ref_count, pkcs11_lib, s_pkcs11_lib_destroy);
+ pkcs11_lib->allocator = allocator;
+
+ /* Load the library. */
+
+ /* need a null-terminated string to call next function,
+ * or NULL if going to search the current application for PKCS#11 symbols. */
+ struct aws_string *filename_storage = NULL;
+ const char *filename = NULL;
+ if (options->filename.ptr != NULL) {
+ filename_storage = aws_string_new_from_cursor(allocator, &options->filename);
+ filename = aws_string_c_str(filename_storage);
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_PKCS11,
+ "Loading PKCS#11. file:'%s' C_Initialize:%s",
+ filename ? filename : "<MAIN PROGRAM>",
+ (options->initialize_finalize_behavior == AWS_PKCS11_LIB_OMIT_INITIALIZE) ? "omit" : "yes");
+
+ if (aws_shared_library_init(&pkcs11_lib->shared_lib, filename)) {
+ goto error;
+ }
+
+ /* Find C_GetFunctionList() and call it to get the list of pointers to all the other functions */
+ CK_C_GetFunctionList get_function_list = NULL;
+ if (aws_shared_library_find_function(
+ &pkcs11_lib->shared_lib, "C_GetFunctionList", (aws_generic_function *)&get_function_list)) {
+ goto error;
+ }
+
+ CK_RV rv = get_function_list(&pkcs11_lib->function_list);
+ if (rv != CKR_OK) {
+ s_raise_ck_error(pkcs11_lib, "C_GetFunctionList", rv);
+ goto error;
+ }
+
+ /* Check function list's API version */
+ CK_VERSION version = pkcs11_lib->function_list->version;
+ if ((version.major != AWS_SUPPORTED_CRYPTOKI_VERSION_MAJOR) ||
+ (version.minor < AWS_MIN_SUPPORTED_CRYPTOKI_VERSION_MINOR)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p: Library implements PKCS#11 version %" PRIu8 ".%" PRIu8 " but %d.%d compatibility is required",
+ (void *)pkcs11_lib,
+ version.major,
+ version.minor,
+ AWS_SUPPORTED_CRYPTOKI_VERSION_MAJOR,
+ AWS_MIN_SUPPORTED_CRYPTOKI_VERSION_MINOR);
+
+ aws_raise_error(AWS_ERROR_PKCS11_VERSION_UNSUPPORTED);
+ goto error;
+ }
+
+ /* Call C_Initialize() */
+ const char *init_logging_str = "omit";
+ if (options->initialize_finalize_behavior != AWS_PKCS11_LIB_OMIT_INITIALIZE) {
+ CK_C_INITIALIZE_ARGS init_args = {
+ /* encourage lib to use our locks */
+ .CreateMutex = s_pkcs11_create_mutex,
+ .DestroyMutex = s_pkcs11_destroy_mutex,
+ .LockMutex = s_pkcs11_lock_mutex,
+ .UnlockMutex = s_pkcs11_unlock_mutex,
+ /* but if it needs to use OS locks instead, sure whatever you do you */
+ .flags = CKF_OS_LOCKING_OK,
+ };
+
+ rv = pkcs11_lib->function_list->C_Initialize(&init_args);
+ if (rv != CKR_OK) {
+ /* Ignore already-initialized errors (unless user wants STRICT behavior) */
+ if (rv != CKR_CRYPTOKI_ALREADY_INITIALIZED ||
+ options->initialize_finalize_behavior == AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE) {
+
+ s_raise_ck_error(pkcs11_lib, "C_Initialize", rv);
+ goto error;
+ }
+ }
+
+ init_logging_str = aws_pkcs11_ckr_str(rv);
+
+ if (options->initialize_finalize_behavior == AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE) {
+ pkcs11_lib->finalize_on_cleanup = true;
+ }
+ }
+
+ /* Get info about the library and log it.
+ * This will be VERY useful for diagnosing user issues. */
+ CK_INFO info;
+ AWS_ZERO_STRUCT(info);
+ rv = pkcs11_lib->function_list->C_GetInfo(&info);
+ if (rv != CKR_OK) {
+ s_raise_ck_error(pkcs11_lib, "C_GetInfo", rv);
+ goto error;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_IO_PKCS11,
+ "id=%p: PKCS#11 loaded. file:'%s' cryptokiVersion:%" PRIu8 ".%" PRIu8 " manufacturerID:'" PRInSTR
+ "' flags:0x%08lX libraryDescription:'" PRInSTR "' libraryVersion:%" PRIu8 ".%" PRIu8 " C_Initialize:%s",
+ (void *)pkcs11_lib,
+ filename ? filename : "<MAIN PROGRAM>",
+ info.cryptokiVersion.major,
+ info.cryptokiVersion.minor,
+ AWS_BYTE_CURSOR_PRI(s_trim_padding(info.manufacturerID, sizeof(info.manufacturerID))),
+ info.flags,
+ AWS_BYTE_CURSOR_PRI(s_trim_padding(info.libraryDescription, sizeof(info.libraryDescription))),
+ info.libraryVersion.major,
+ info.libraryVersion.minor,
+ init_logging_str);
+
+ /* Success! */
+ goto clean_up;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p: Failed to initialize PKCS#11 library from '%s'",
+ (void *)pkcs11_lib,
+ filename ? filename : "<MAIN_PROGRAM>");
+
+ aws_pkcs11_lib_release(pkcs11_lib);
+ pkcs11_lib = NULL;
+
+clean_up:
+ aws_string_destroy(filename_storage);
+ return pkcs11_lib;
+}
+
+struct aws_pkcs11_lib *aws_pkcs11_lib_acquire(struct aws_pkcs11_lib *pkcs11_lib) {
+ aws_ref_count_acquire(&pkcs11_lib->ref_count);
+ return pkcs11_lib;
+}
+
+void aws_pkcs11_lib_release(struct aws_pkcs11_lib *pkcs11_lib) {
+ if (pkcs11_lib) {
+ aws_ref_count_release(&pkcs11_lib->ref_count);
+ }
+}
+
+/**
+ * Find the slot that meets all criteria:
+ * - has a token
+ * - if match_slot_id is non-null, then slot IDs must match
+ * - if match_token_label is non-null, then labels must match
+ * The function fails unless it finds exactly one slot meeting all criteria.
+ */
+int aws_pkcs11_lib_find_slot_with_token(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ const uint64_t *match_slot_id,
+ const struct aws_string *match_token_label,
+ CK_SLOT_ID *out_slot_id) {
+
+ CK_SLOT_ID *slot_id_array = NULL; /* array of IDs */
+ CK_SLOT_ID *candidate = NULL; /* points to ID in slot_id_array */
+ CK_TOKEN_INFO info;
+ AWS_ZERO_STRUCT(info);
+ bool success = false;
+
+ /* query number of slots with tokens */
+ CK_ULONG num_slots = 0;
+ CK_RV rv = pkcs11_lib->function_list->C_GetSlotList(CK_TRUE /*tokenPresent*/, NULL /*pSlotList*/, &num_slots);
+ if (rv != CKR_OK) {
+ s_raise_ck_error(pkcs11_lib, "C_GetSlotList", rv);
+ goto clean_up;
+ }
+
+ if (num_slots == 0) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "id=%p: No PKCS#11 tokens present in any slot", (void *)pkcs11_lib);
+ aws_raise_error(AWS_ERROR_PKCS11_TOKEN_NOT_FOUND);
+ goto clean_up;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_PKCS11, "id=%p: Found %lu slots with tokens. Picking one...", (void *)pkcs11_lib, num_slots);
+
+ /* allocate space for slot IDs */
+ slot_id_array = aws_mem_calloc(pkcs11_lib->allocator, num_slots, sizeof(CK_SLOT_ID));
+
+ /* query all slot IDs */
+ rv = pkcs11_lib->function_list->C_GetSlotList(CK_TRUE /*tokenPresent*/, slot_id_array, &num_slots);
+ if (rv != CKR_OK) {
+ s_raise_ck_error(pkcs11_lib, "C_GetSlotList", rv);
+ goto clean_up;
+ }
+
+ for (size_t i = 0; i < num_slots; ++i) {
+ CK_SLOT_ID slot_id_i = slot_id_array[i];
+
+ /* if specific slot_id requested, and this isn't it, then skip */
+ if ((match_slot_id != NULL) && (*match_slot_id != slot_id_i)) {
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_PKCS11,
+ "id=%p: Ignoring PKCS#11 token because slot %lu doesn't match %" PRIu64,
+ (void *)pkcs11_lib,
+ slot_id_i,
+ *match_slot_id);
+ continue;
+ }
+
+ /* query token info */
+ CK_TOKEN_INFO token_info_i;
+ AWS_ZERO_STRUCT(token_info_i);
+ rv = pkcs11_lib->function_list->C_GetTokenInfo(slot_id_i, &token_info_i);
+ if (rv != CKR_OK) {
+ s_raise_ck_error(pkcs11_lib, "C_GetTokenInfo", rv);
+ goto clean_up;
+ }
+
+ /* if specific token label requested, and this isn't it, then skip */
+ if (match_token_label != NULL) {
+ struct aws_byte_cursor label_i = s_trim_padding(token_info_i.label, sizeof(token_info_i.label));
+ if (aws_string_eq_byte_cursor(match_token_label, &label_i) == false) {
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_PKCS11,
+ "id=%p: Ignoring PKCS#11 token in slot %lu because label '" PRInSTR "' doesn't match '%s'",
+ (void *)pkcs11_lib,
+ slot_id_i,
+ AWS_BYTE_CURSOR_PRI(label_i),
+ aws_string_c_str(match_token_label));
+ continue;
+ }
+ }
+
+ /* this slot is a candidate! */
+
+ /* be sure there's only one candidate */
+ if (candidate != NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p: Failed to choose PKCS#11 token, multiple tokens match search criteria",
+ (void *)pkcs11_lib);
+ aws_raise_error(AWS_ERROR_PKCS11_TOKEN_NOT_FOUND);
+ goto clean_up;
+ }
+
+ /* the new candidate! */
+ candidate = &slot_id_array[i];
+ memcpy(&info, &token_info_i, sizeof(CK_TOKEN_INFO));
+ }
+
+ if (candidate == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11, "id=%p: Failed to find PKCS#11 token which matches search criteria", (void *)pkcs11_lib);
+ aws_raise_error(AWS_ERROR_PKCS11_TOKEN_NOT_FOUND);
+ goto clean_up;
+ }
+
+ /* success! */
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_PKCS11,
+ "id=%p: Selected PKCS#11 token. slot:%lu label:'" PRInSTR "' manufacturerID:'" PRInSTR "' model:'" PRInSTR
+ "' serialNumber:'" PRInSTR "' flags:0x%08lX sessionCount:%lu/%lu rwSessionCount:%lu/%lu"
+ " freePublicMemory:%lu/%lu freePrivateMemory:%lu/%lu"
+ " hardwareVersion:%" PRIu8 ".%" PRIu8 " firmwareVersion:%" PRIu8 ".%" PRIu8,
+ (void *)pkcs11_lib,
+ *candidate,
+ AWS_BYTE_CURSOR_PRI(s_trim_padding(info.label, sizeof(info.label))),
+ AWS_BYTE_CURSOR_PRI(s_trim_padding(info.manufacturerID, sizeof(info.manufacturerID))),
+ AWS_BYTE_CURSOR_PRI(s_trim_padding(info.model, sizeof(info.model))),
+ AWS_BYTE_CURSOR_PRI(s_trim_padding(info.serialNumber, sizeof(info.serialNumber))),
+ info.flags,
+ info.ulSessionCount,
+ info.ulMaxSessionCount,
+ info.ulRwSessionCount,
+ info.ulMaxRwSessionCount,
+ info.ulFreePublicMemory,
+ info.ulTotalPublicMemory,
+ info.ulFreePrivateMemory,
+ info.ulTotalPrivateMemory,
+ info.hardwareVersion.major,
+ info.hardwareVersion.minor,
+ info.firmwareVersion.major,
+ info.firmwareVersion.minor);
+
+ *out_slot_id = *candidate;
+ success = true;
+
+clean_up:
+ aws_mem_release(pkcs11_lib->allocator, slot_id_array);
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+CK_FUNCTION_LIST *aws_pkcs11_lib_get_function_list(struct aws_pkcs11_lib *pkcs11_lib) {
+ return pkcs11_lib->function_list;
+}
+
+int aws_pkcs11_lib_open_session(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SLOT_ID slot_id,
+ CK_SESSION_HANDLE *out_session_handle) {
+
+ CK_SESSION_HANDLE session_handle = CK_INVALID_HANDLE;
+ CK_RV rv = pkcs11_lib->function_list->C_OpenSession(
+ slot_id, CKF_SERIAL_SESSION /*flags*/, NULL /*pApplication*/, NULL /*notify*/, &session_handle);
+ if (rv != CKR_OK) {
+ return s_raise_ck_error(pkcs11_lib, "C_OpenSession", rv);
+ }
+
+ /* success! */
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_PKCS11, "id=%p session=%lu: Session opened on slot %lu", (void *)pkcs11_lib, session_handle, slot_id);
+
+ *out_session_handle = session_handle;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_pkcs11_lib_close_session(struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle) {
+ CK_RV rv = pkcs11_lib->function_list->C_CloseSession(session_handle);
+ if (rv == CKR_OK) {
+ AWS_LOGF_DEBUG(AWS_LS_IO_PKCS11, "id=%p session=%lu: Session closed", (void *)pkcs11_lib, session_handle);
+ } else {
+ /* Log the error, but we can't really do anything about it */
+ AWS_LOGF_WARN(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: Ignoring C_CloseSession() failure. PKCS#11 error: %s (0x%08lX)",
+ (void *)pkcs11_lib,
+ session_handle,
+ aws_pkcs11_ckr_str(rv),
+ rv);
+ }
+}
+
+int aws_pkcs11_lib_login_user(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ const struct aws_string *optional_user_pin) {
+
+ CK_UTF8CHAR_PTR pin = NULL;
+ CK_ULONG pin_len = 0;
+ if (optional_user_pin) {
+ if (optional_user_pin->len > ULONG_MAX) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "id=%p session=%lu: PIN is too long", (void *)pkcs11_lib, session_handle);
+ return aws_raise_error(AWS_ERROR_PKCS11_CKR_PIN_INCORRECT);
+ }
+ pin_len = (CK_ULONG)optional_user_pin->len;
+ pin = (CK_UTF8CHAR_PTR)optional_user_pin->bytes;
+ }
+
+ CK_RV rv = pkcs11_lib->function_list->C_Login(session_handle, CKU_USER, pin, pin_len);
+ /* Ignore if we are already logged in, this could happen if application using device sdk also logs in to pkcs11 */
+ if (rv != CKR_OK && rv != CKR_USER_ALREADY_LOGGED_IN) {
+ return s_raise_ck_session_error(pkcs11_lib, "C_Login", session_handle, rv);
+ }
+
+ /* Success! */
+ if (rv == CKR_USER_ALREADY_LOGGED_IN) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_PKCS11, "id=%p session=%lu: User was already logged in", (void *)pkcs11_lib, session_handle);
+ } else {
+ AWS_LOGF_DEBUG(AWS_LS_IO_PKCS11, "id=%p session=%lu: User logged in", (void *)pkcs11_lib, session_handle);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Find the object that meets all criteria:
+ * - is private key
+ * - if match_label is non-null, then labels must match
+ * The function fails unless it finds exactly one object meeting all criteria.
+ */
+int aws_pkcs11_lib_find_private_key(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ const struct aws_string *match_label,
+ CK_OBJECT_HANDLE *out_key_handle,
+ CK_KEY_TYPE *out_key_type) {
+
+ /* gets set true after everything succeeds */
+ bool success = false;
+
+ /* gets set true after search initialized.
+ * indicates that C_FindObjectsFinal() must be run before function ends */
+ bool must_finalize_search = false;
+
+ /* set up search attributes */
+ CK_OBJECT_CLASS key_class = CKO_PRIVATE_KEY;
+ CK_ULONG num_attributes = 1;
+ CK_ATTRIBUTE attributes[2] = {
+ {
+ .type = CKA_CLASS,
+ .pValue = &key_class,
+ .ulValueLen = sizeof(key_class),
+ },
+ };
+
+ if (match_label != NULL) {
+ if (match_label->len > ULONG_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: private key label is too long",
+ (void *)pkcs11_lib,
+ session_handle);
+ aws_raise_error(AWS_ERROR_PKCS11_KEY_NOT_FOUND);
+ goto clean_up;
+ }
+
+ CK_ATTRIBUTE *attr = &attributes[num_attributes++];
+ attr->type = CKA_LABEL;
+ attr->pValue = (void *)match_label->bytes;
+ attr->ulValueLen = (CK_ULONG)match_label->len;
+ }
+
+ /* initialize search */
+ CK_RV rv = pkcs11_lib->function_list->C_FindObjectsInit(session_handle, attributes, num_attributes);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_FindObjectsInit", session_handle, rv);
+ goto clean_up;
+ }
+
+ must_finalize_search = true;
+
+ /* get search results.
+ * note that we're asking for 2 objects max, so we can fail if we find more than one */
+ CK_OBJECT_HANDLE found_objects[2] = {0};
+ CK_ULONG num_found = 0;
+ rv = pkcs11_lib->function_list->C_FindObjects(session_handle, found_objects, 2 /*max*/, &num_found);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_FindObjects", session_handle, rv);
+ goto clean_up;
+ }
+
+ if ((num_found == 0) || (found_objects[0] == CK_INVALID_HANDLE)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: Failed to find private key on PKCS#11 token which matches search criteria",
+ (void *)pkcs11_lib,
+ session_handle);
+ aws_raise_error(AWS_ERROR_PKCS11_KEY_NOT_FOUND);
+ goto clean_up;
+ }
+ if (num_found > 1) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: Failed to choose private key, multiple objects on PKCS#11 token match search criteria",
+ (void *)pkcs11_lib,
+ session_handle);
+ aws_raise_error(AWS_ERROR_PKCS11_KEY_NOT_FOUND);
+ goto clean_up;
+ }
+
+ /* key found */
+ CK_OBJECT_HANDLE key_handle = found_objects[0];
+
+ /* query key-type */
+ CK_KEY_TYPE key_type = 0;
+ CK_ATTRIBUTE key_attributes[] = {
+ {
+ .type = CKA_KEY_TYPE,
+ .pValue = &key_type,
+ .ulValueLen = sizeof(key_type),
+ },
+ };
+
+ rv = pkcs11_lib->function_list->C_GetAttributeValue(
+ session_handle, key_handle, key_attributes, AWS_ARRAY_SIZE(key_attributes));
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_GetAttributeValue", session_handle, rv);
+ goto clean_up;
+ }
+
+ switch (key_type) {
+ case CKK_RSA:
+ case CKK_EC:
+ break;
+ default:
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: PKCS#11 private key type %s (0x%08lX) is currently unsupported",
+ (void *)pkcs11_lib,
+ session_handle,
+ s_ckk_str(key_type),
+ key_type);
+ aws_raise_error(AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED);
+ goto clean_up;
+ }
+
+ /* Success! */
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: Found private key. type=%s",
+ (void *)pkcs11_lib,
+ session_handle,
+ s_ckk_str(key_type));
+ *out_key_handle = key_handle;
+ *out_key_type = key_type;
+ success = true;
+
+clean_up:
+
+ if (must_finalize_search) {
+ rv = pkcs11_lib->function_list->C_FindObjectsFinal(session_handle);
+ /* don't bother reporting error if we were already failing */
+ if ((rv != CKR_OK) && (success == true)) {
+ s_raise_ck_session_error(pkcs11_lib, "C_FindObjectsFinal", session_handle, rv);
+ success = false;
+ }
+ }
+
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+int aws_pkcs11_lib_decrypt(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ CK_OBJECT_HANDLE key_handle,
+ CK_KEY_TYPE key_type,
+ struct aws_byte_cursor encrypted_data,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *out_data) {
+
+ AWS_ASSERT(encrypted_data.len <= ULONG_MAX); /* do real error checking if this becomes a public API */
+ AWS_ASSERT(out_data->allocator == NULL);
+
+ CK_MECHANISM mechanism;
+ AWS_ZERO_STRUCT(mechanism);
+
+ /* Note, CKK_EC is not expected to enter into this code path */
+ switch (key_type) {
+ case CKK_RSA:
+ mechanism.mechanism = CKM_RSA_PKCS;
+ break;
+ default:
+ aws_raise_error(AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED);
+ goto error;
+ }
+
+ /* initialize the decryption operation */
+ CK_RV rv = pkcs11_lib->function_list->C_DecryptInit(session_handle, &mechanism, key_handle);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_DecryptInit", session_handle, rv);
+ goto error;
+ }
+
+ /* query needed capacity (finalizes decryption operation if it fails) */
+ CK_ULONG data_len = 0;
+ rv = pkcs11_lib->function_list->C_Decrypt(
+ session_handle, encrypted_data.ptr, (CK_ULONG)encrypted_data.len, NULL /*pData*/, &data_len);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_Decrypt", session_handle, rv);
+ goto error;
+ }
+
+ aws_byte_buf_init(out_data, allocator, data_len); /* cannot fail */
+
+ /* do actual decrypt (finalizes decryption operation, whether it succeeds or fails)*/
+ rv = pkcs11_lib->function_list->C_Decrypt(
+ session_handle, encrypted_data.ptr, (CK_ULONG)encrypted_data.len, out_data->buffer, &data_len);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_Decrypt", session_handle, rv);
+ goto error;
+ }
+
+ out_data->len = data_len;
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_byte_buf_clean_up(out_data);
+ return AWS_OP_ERR;
+}
+
+/* runs C_Sign(), putting encrypted message into out_signature */
+static int s_pkcs11_sign_helper(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ CK_OBJECT_HANDLE key_handle,
+ CK_MECHANISM mechanism,
+ struct aws_byte_cursor input_data,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *out_signature) {
+
+ /* initialize signing operation */
+ CK_RV rv = pkcs11_lib->function_list->C_SignInit(session_handle, &mechanism, key_handle);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_SignInit", session_handle, rv);
+ goto error;
+ }
+
+ /* query needed capacity (finalizes signing operation if it fails) */
+ CK_ULONG signature_len = 0;
+ rv = pkcs11_lib->function_list->C_Sign(
+ session_handle, input_data.ptr, (CK_ULONG)input_data.len, NULL /*pSignature*/, &signature_len);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_Sign", session_handle, rv);
+ goto error;
+ }
+
+ aws_byte_buf_init(out_signature, allocator, signature_len); /* cannot fail */
+
+ /* do actual signing (finalizes signing operation, whether it succeeds or fails) */
+ rv = pkcs11_lib->function_list->C_Sign(
+ session_handle, input_data.ptr, (CK_ULONG)input_data.len, out_signature->buffer, &signature_len);
+ if (rv != CKR_OK) {
+ s_raise_ck_session_error(pkcs11_lib, "C_Sign", session_handle, rv);
+ goto error;
+ }
+
+ out_signature->len = signature_len;
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_byte_buf_clean_up(out_signature);
+ return AWS_OP_ERR;
+}
+
+int aws_get_prefix_to_rsa_sig(enum aws_tls_hash_algorithm digest_alg, struct aws_byte_cursor *out_prefix) {
+ switch (digest_alg) {
+ case AWS_TLS_HASH_SHA1:
+ *out_prefix = aws_byte_cursor_from_array(SHA1_PREFIX_TO_RSA_SIG, sizeof(SHA1_PREFIX_TO_RSA_SIG));
+ break;
+ case AWS_TLS_HASH_SHA224:
+ *out_prefix = aws_byte_cursor_from_array(SHA224_PREFIX_TO_RSA_SIG, sizeof(SHA224_PREFIX_TO_RSA_SIG));
+ break;
+ case AWS_TLS_HASH_SHA256:
+ *out_prefix = aws_byte_cursor_from_array(SHA256_PREFIX_TO_RSA_SIG, sizeof(SHA256_PREFIX_TO_RSA_SIG));
+ break;
+ case AWS_TLS_HASH_SHA384:
+ *out_prefix = aws_byte_cursor_from_array(SHA384_PREFIX_TO_RSA_SIG, sizeof(SHA384_PREFIX_TO_RSA_SIG));
+ break;
+ case AWS_TLS_HASH_SHA512:
+ *out_prefix = aws_byte_cursor_from_array(SHA512_PREFIX_TO_RSA_SIG, sizeof(SHA512_PREFIX_TO_RSA_SIG));
+ break;
+ default:
+ return aws_raise_error(AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_pkcs11_sign_rsa(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ CK_OBJECT_HANDLE key_handle,
+ struct aws_byte_cursor digest_data,
+ struct aws_allocator *allocator,
+ enum aws_tls_hash_algorithm digest_alg,
+ enum aws_tls_signature_algorithm signature_alg,
+ struct aws_byte_buf *out_signature) {
+
+ if (signature_alg != AWS_TLS_SIGNATURE_RSA) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: Signature algorithm '%s' is currently unsupported for PKCS#11 RSA keys. "
+ "Supported algorithms are: RSA",
+ (void *)pkcs11_lib,
+ session_handle,
+ aws_tls_signature_algorithm_str(signature_alg));
+ return aws_raise_error(AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED);
+ }
+
+ struct aws_byte_cursor prefix;
+ if (aws_get_prefix_to_rsa_sig(digest_alg, &prefix)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: Unsupported digest '%s' for PKCS#11 RSA signing. "
+ "Supported digests are: SHA1, SHA256, SHA384 and SHA512. AWS error: %s",
+ (void *)pkcs11_lib,
+ session_handle,
+ aws_tls_hash_algorithm_str(digest_alg),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ bool success = false;
+
+ struct aws_byte_buf prefixed_input;
+ aws_byte_buf_init(&prefixed_input, allocator, digest_data.len + prefix.len); /* cannot fail */
+ aws_byte_buf_write_from_whole_cursor(&prefixed_input, prefix);
+ aws_byte_buf_write_from_whole_cursor(&prefixed_input, digest_data);
+
+ /* We could get the original input and not the digest to sign and leverage CKM_SHA*_RSA_PKCS mechanisms
+ * but the original input is too large (all the TLS handshake messages until clientCertVerify) and
+ * we do not want to perform the digest inside the TPM for performance reasons, therefore we only
+ * leverage CKM_RSA_PKCS mechanism and *only* sign the digest using TPM. Only signing requires
+ * additional prefix to the input to complete the digest part for RSA signing. */
+ CK_MECHANISM mechanism = {.mechanism = CKM_RSA_PKCS};
+
+ if (s_pkcs11_sign_helper(
+ pkcs11_lib,
+ session_handle,
+ key_handle,
+ mechanism,
+ aws_byte_cursor_from_buf(&prefixed_input),
+ allocator,
+ out_signature)) {
+ goto error;
+ }
+
+ success = true;
+ goto clean_up;
+
+error:
+ aws_byte_buf_clean_up(out_signature);
+clean_up:
+ aws_byte_buf_clean_up(&prefixed_input);
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+/*
+ * Basic ASN.1 (DER) encoding of header -- sufficient for ECDSA
+ */
+static int s_asn1_enc_prefix(struct aws_byte_buf *buffer, uint8_t identifier, size_t length) {
+ if (((identifier & 0x1f) == 0x1f) || (length > 0x7f)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Unable to encode ASN.1 (DER) header 0x%02x %zu", identifier, length);
+ return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR);
+ }
+ uint8_t head[2];
+ head[0] = identifier;
+ head[1] = (uint8_t)length;
+ if (!aws_byte_buf_write(buffer, head, sizeof(head))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11, "Insufficient buffer to encode ASN.1 (DER) header 0x%02x %zu", identifier, length);
+ return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Basic ASN.1 (DER) encoding of an unsigned big number -- sufficient for ECDSA. Note that this implementation
+ * may reduce the number of integer bytes down to 1 (removing leading zero bytes), or conversely increase by
+ * one extra byte to ensure the unsigned integer is unambiguously encoded.
+ */
+int aws_pkcs11_asn1_enc_ubigint(struct aws_byte_buf *const buffer, struct aws_byte_cursor bigint) {
+
+ // trim out all leading zero's
+ while (bigint.len > 0 && bigint.ptr[0] == 0) {
+ aws_byte_cursor_advance(&bigint, 1);
+ }
+
+ // If the most significant bit is a '1', prefix with a zero-byte to prevent misinterpreting number as negative.
+ // If the big integer value was zero, length will be zero, replace with zero-byte using the same approach.
+ bool add_leading_zero = bigint.len == 0 || (bigint.ptr[0] & 0x80) != 0;
+ size_t actual_len = bigint.len + (add_leading_zero ? 1 : 0);
+
+ // header - indicate integer of given length (including any prefix zero)
+ bool success = s_asn1_enc_prefix(buffer, 0x02, actual_len) == AWS_OP_SUCCESS;
+ if (add_leading_zero) {
+ success = success && aws_byte_buf_write_u8(buffer, 0);
+ }
+ // write rest of number
+ success = success && aws_byte_buf_write_from_whole_cursor(buffer, bigint);
+ if (success) {
+ return AWS_OP_SUCCESS;
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11, "Insufficient buffer to ASN.1 (DER) encode big integer of length %zu", actual_len);
+ return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR);
+ }
+}
+
+static int s_pkcs11_sign_ecdsa(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ CK_OBJECT_HANDLE key_handle,
+ struct aws_byte_cursor digest_data,
+ struct aws_allocator *allocator,
+ enum aws_tls_signature_algorithm signature_alg,
+ struct aws_byte_buf *out_signature) {
+
+ struct aws_byte_buf part_signature;
+ struct aws_byte_buf r_part;
+ struct aws_byte_buf s_part;
+ AWS_ZERO_STRUCT(part_signature);
+ AWS_ZERO_STRUCT(r_part);
+ AWS_ZERO_STRUCT(s_part);
+
+ if (signature_alg != AWS_TLS_SIGNATURE_ECDSA) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "id=%p session=%lu: Signature algorithm '%s' is currently unsupported for PKCS#11 EC keys. "
+ "Supported algorithms are: ECDSA",
+ (void *)pkcs11_lib,
+ session_handle,
+ aws_tls_signature_algorithm_str(signature_alg));
+ return aws_raise_error(AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED);
+ }
+
+ bool success = false;
+
+ /* ECDSA signing consists of DER-encoding of "r" and "s" parameters. C_Sign returns the two
+ * integers as big numbers in big-endian format, so translation is required.
+ */
+ CK_MECHANISM mechanism = {.mechanism = CKM_ECDSA};
+
+ if (s_pkcs11_sign_helper(
+ pkcs11_lib, session_handle, key_handle, mechanism, digest_data, allocator, &part_signature) !=
+ AWS_OP_SUCCESS) {
+ goto error;
+ }
+
+ /* PKCS11 library returns these parameters as two big unsigned integer numbers of exactly the same length. The
+ * numbers need to be ASN.1/DER encoded (variable length). In addition to the header, space is needed to allow for
+ * an occasional extra 0x00 prefix byte to ensure integer is encoded and interpreted as unsigned.
+ */
+ if (part_signature.len == 0 || (part_signature.len & 1) != 0) {
+ /* This should never happen, we would fail anyway, but making it explicit and fail early */
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "PKCS11 library returned an invalid length, unable to interpret ECDSA signature to encode correctly.");
+ return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR);
+ goto error;
+ }
+ size_t num_bytes = part_signature.len / 2;
+ aws_byte_buf_init(&r_part, allocator, num_bytes + 4);
+ aws_byte_buf_init(&s_part, allocator, num_bytes + 4);
+
+ if (aws_pkcs11_asn1_enc_ubigint(&r_part, aws_byte_cursor_from_array(part_signature.buffer, num_bytes)) !=
+ AWS_OP_SUCCESS) {
+ goto error;
+ }
+ if (aws_pkcs11_asn1_enc_ubigint(
+ &s_part, aws_byte_cursor_from_array(part_signature.buffer + num_bytes, num_bytes)) != AWS_OP_SUCCESS) {
+ goto error;
+ }
+ size_t pair_len = r_part.len + s_part.len;
+ aws_byte_buf_init(out_signature, allocator, pair_len + 2); // inc header
+ if (s_asn1_enc_prefix(out_signature, 0x30, pair_len) != AWS_OP_SUCCESS) {
+ goto error;
+ }
+ if (!aws_byte_buf_write_from_whole_buffer(out_signature, r_part)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Insufficient buffer to ASN.1 (DER) encode ECDSA signature R-part.");
+ return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR);
+ goto error;
+ }
+ if (!aws_byte_buf_write_from_whole_buffer(out_signature, s_part)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Insufficient buffer to ASN.1 (DER) encode ECDSA signature S-part.");
+ return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR);
+ goto error;
+ }
+ success = true;
+ goto clean_up;
+
+error:
+ aws_byte_buf_clean_up(out_signature);
+clean_up:
+ aws_byte_buf_clean_up(&part_signature);
+ aws_byte_buf_clean_up(&r_part);
+ aws_byte_buf_clean_up(&s_part);
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+int aws_pkcs11_lib_sign(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ CK_OBJECT_HANDLE key_handle,
+ CK_KEY_TYPE key_type,
+ struct aws_byte_cursor digest_data,
+ struct aws_allocator *allocator,
+ enum aws_tls_hash_algorithm digest_alg,
+ enum aws_tls_signature_algorithm signature_alg,
+ struct aws_byte_buf *out_signature) {
+
+ AWS_ASSERT(digest_data.len <= ULONG_MAX); /* do real error checking if this becomes a public API */
+ AWS_ASSERT(out_signature->allocator == NULL);
+
+ switch (key_type) {
+ case CKK_RSA:
+ return s_pkcs11_sign_rsa(
+ pkcs11_lib,
+ session_handle,
+ key_handle,
+ digest_data,
+ allocator,
+ digest_alg,
+ signature_alg,
+ out_signature);
+ case CKK_ECDSA:
+ return s_pkcs11_sign_ecdsa(
+ pkcs11_lib,
+ session_handle,
+ key_handle,
+ digest_data,
+ allocator,
+ // not digest_alg -- need to check this
+ signature_alg,
+ out_signature);
+ default:
+ return aws_raise_error(AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED);
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h
new file mode 100644
index 0000000000..0d78dd7113
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11.h
@@ -0,0 +1,265 @@
+/* Copyright (c) OASIS Open 2016. All Rights Reserved./
+ * /Distributed under the terms of the OASIS IPR Policy,
+ * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
+ * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
+ */
+
+/* Latest version of the specification:
+ * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html
+ */
+
+#ifndef _PKCS11_H_
+#define _PKCS11_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Before including this file (pkcs11.h) (or pkcs11t.h by
+ * itself), 5 platform-specific macros must be defined. These
+ * macros are described below, and typical definitions for them
+ * are also given. Be advised that these definitions can depend
+ * on both the platform and the compiler used (and possibly also
+ * on whether a Cryptoki library is linked statically or
+ * dynamically).
+ *
+ * In addition to defining these 5 macros, the packing convention
+ * for Cryptoki structures should be set. The Cryptoki
+ * convention on packing is that structures should be 1-byte
+ * aligned.
+ *
+ * If you're using Microsoft Developer Studio 5.0 to produce
+ * Win32 stuff, this might be done by using the following
+ * preprocessor directive before including pkcs11.h or pkcs11t.h:
+ *
+ * #pragma pack(push, cryptoki, 1)
+ *
+ * and using the following preprocessor directive after including
+ * pkcs11.h or pkcs11t.h:
+ *
+ * #pragma pack(pop, cryptoki)
+ *
+ * If you're using an earlier version of Microsoft Developer
+ * Studio to produce Win16 stuff, this might be done by using
+ * the following preprocessor directive before including
+ * pkcs11.h or pkcs11t.h:
+ *
+ * #pragma pack(1)
+ *
+ * In a UNIX environment, you're on your own for this. You might
+ * not need to do (or be able to do!) anything.
+ *
+ *
+ * Now for the macros:
+ *
+ *
+ * 1. CK_PTR: The indirection string for making a pointer to an
+ * object. It can be used like this:
+ *
+ * typedef CK_BYTE CK_PTR CK_BYTE_PTR;
+ *
+ * If you're using Microsoft Developer Studio 5.0 to produce
+ * Win32 stuff, it might be defined by:
+ *
+ * #define CK_PTR *
+ *
+ * If you're using an earlier version of Microsoft Developer
+ * Studio to produce Win16 stuff, it might be defined by:
+ *
+ * #define CK_PTR far *
+ *
+ * In a typical UNIX environment, it might be defined by:
+ *
+ * #define CK_PTR *
+ *
+ *
+ * 2. CK_DECLARE_FUNCTION(returnType, name): A macro which makes
+ * an importable Cryptoki library function declaration out of a
+ * return type and a function name. It should be used in the
+ * following fashion:
+ *
+ * extern CK_DECLARE_FUNCTION(CK_RV, C_Initialize)(
+ * CK_VOID_PTR pReserved
+ * );
+ *
+ * If you're using Microsoft Developer Studio 5.0 to declare a
+ * function in a Win32 Cryptoki .dll, it might be defined by:
+ *
+ * #define CK_DECLARE_FUNCTION(returnType, name) \
+ * returnType __declspec(dllimport) name
+ *
+ * If you're using an earlier version of Microsoft Developer
+ * Studio to declare a function in a Win16 Cryptoki .dll, it
+ * might be defined by:
+ *
+ * #define CK_DECLARE_FUNCTION(returnType, name) \
+ * returnType __export _far _pascal name
+ *
+ * In a UNIX environment, it might be defined by:
+ *
+ * #define CK_DECLARE_FUNCTION(returnType, name) \
+ * returnType name
+ *
+ *
+ * 3. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro
+ * which makes a Cryptoki API function pointer declaration or
+ * function pointer type declaration out of a return type and a
+ * function name. It should be used in the following fashion:
+ *
+ * // Define funcPtr to be a pointer to a Cryptoki API function
+ * // taking arguments args and returning CK_RV.
+ * CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtr)(args);
+ *
+ * or
+ *
+ * // Define funcPtrType to be the type of a pointer to a
+ * // Cryptoki API function taking arguments args and returning
+ * // CK_RV, and then define funcPtr to be a variable of type
+ * // funcPtrType.
+ * typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtrType)(args);
+ * funcPtrType funcPtr;
+ *
+ * If you're using Microsoft Developer Studio 5.0 to access
+ * functions in a Win32 Cryptoki .dll, in might be defined by:
+ *
+ * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
+ * returnType __declspec(dllimport) (* name)
+ *
+ * If you're using an earlier version of Microsoft Developer
+ * Studio to access functions in a Win16 Cryptoki .dll, it might
+ * be defined by:
+ *
+ * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
+ * returnType __export _far _pascal (* name)
+ *
+ * In a UNIX environment, it might be defined by:
+ *
+ * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
+ * returnType (* name)
+ *
+ *
+ * 4. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes
+ * a function pointer type for an application callback out of
+ * a return type for the callback and a name for the callback.
+ * It should be used in the following fashion:
+ *
+ * CK_CALLBACK_FUNCTION(CK_RV, myCallback)(args);
+ *
+ * to declare a function pointer, myCallback, to a callback
+ * which takes arguments args and returns a CK_RV. It can also
+ * be used like this:
+ *
+ * typedef CK_CALLBACK_FUNCTION(CK_RV, myCallbackType)(args);
+ * myCallbackType myCallback;
+ *
+ * If you're using Microsoft Developer Studio 5.0 to do Win32
+ * Cryptoki development, it might be defined by:
+ *
+ * #define CK_CALLBACK_FUNCTION(returnType, name) \
+ * returnType (* name)
+ *
+ * If you're using an earlier version of Microsoft Developer
+ * Studio to do Win16 development, it might be defined by:
+ *
+ * #define CK_CALLBACK_FUNCTION(returnType, name) \
+ * returnType _far _pascal (* name)
+ *
+ * In a UNIX environment, it might be defined by:
+ *
+ * #define CK_CALLBACK_FUNCTION(returnType, name) \
+ * returnType (* name)
+ *
+ *
+ * 5. NULL_PTR: This macro is the value of a NULL pointer.
+ *
+ * In any ANSI/ISO C environment (and in many others as well),
+ * this should best be defined by
+ *
+ * #ifndef NULL_PTR
+ * #define NULL_PTR 0
+ * #endif
+ */
+
+
+/* All the various Cryptoki types and #define'd values are in the
+ * file pkcs11t.h.
+ */
+#include "pkcs11t.h"
+
+#define __PASTE(x,y) x##y
+
+
+/* ==============================================================
+ * Define the "extern" form of all the entry points.
+ * ==============================================================
+ */
+
+#define CK_NEED_ARG_LIST 1
+#define CK_PKCS11_FUNCTION_INFO(name) \
+ extern CK_DECLARE_FUNCTION(CK_RV, name)
+
+/* pkcs11f.h has all the information about the Cryptoki
+ * function prototypes.
+ */
+#include "pkcs11f.h"
+
+#undef CK_NEED_ARG_LIST
+#undef CK_PKCS11_FUNCTION_INFO
+
+
+/* ==============================================================
+ * Define the typedef form of all the entry points. That is, for
+ * each Cryptoki function C_XXX, define a type CK_C_XXX which is
+ * a pointer to that kind of function.
+ * ==============================================================
+ */
+
+#define CK_NEED_ARG_LIST 1
+#define CK_PKCS11_FUNCTION_INFO(name) \
+ typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_,name))
+
+/* pkcs11f.h has all the information about the Cryptoki
+ * function prototypes.
+ */
+#include "pkcs11f.h"
+
+#undef CK_NEED_ARG_LIST
+#undef CK_PKCS11_FUNCTION_INFO
+
+
+/* ==============================================================
+ * Define structed vector of entry points. A CK_FUNCTION_LIST
+ * contains a CK_VERSION indicating a library's Cryptoki version
+ * and then a whole slew of function pointers to the routines in
+ * the library. This type was declared, but not defined, in
+ * pkcs11t.h.
+ * ==============================================================
+ */
+
+#define CK_PKCS11_FUNCTION_INFO(name) \
+ __PASTE(CK_,name) name;
+
+struct CK_FUNCTION_LIST {
+
+ CK_VERSION version; /* Cryptoki version */
+
+/* Pile all the function pointers into the CK_FUNCTION_LIST. */
+/* pkcs11f.h has all the information about the Cryptoki
+ * function prototypes.
+ */
+#include "pkcs11f.h"
+
+};
+
+#undef CK_PKCS11_FUNCTION_INFO
+
+
+#undef __PASTE
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PKCS11_H_ */
+
diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11f.h b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11f.h
new file mode 100644
index 0000000000..ed90affc5e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11f.h
@@ -0,0 +1,939 @@
+/* Copyright (c) OASIS Open 2016. All Rights Reserved./
+ * /Distributed under the terms of the OASIS IPR Policy,
+ * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
+ * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
+ */
+
+/* Latest version of the specification:
+ * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html
+ */
+
+/* This header file contains pretty much everything about all the
+ * Cryptoki function prototypes. Because this information is
+ * used for more than just declaring function prototypes, the
+ * order of the functions appearing herein is important, and
+ * should not be altered.
+ */
+
+/* General-purpose */
+
+/* C_Initialize initializes the Cryptoki library. */
+CK_PKCS11_FUNCTION_INFO(C_Initialize)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets
+ * cast to CK_C_INITIALIZE_ARGS_PTR
+ * and dereferenced
+ */
+);
+#endif
+
+
+/* C_Finalize indicates that an application is done with the
+ * Cryptoki library.
+ */
+CK_PKCS11_FUNCTION_INFO(C_Finalize)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_VOID_PTR pReserved /* reserved. Should be NULL_PTR */
+);
+#endif
+
+
+/* C_GetInfo returns general information about Cryptoki. */
+CK_PKCS11_FUNCTION_INFO(C_GetInfo)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_INFO_PTR pInfo /* location that receives information */
+);
+#endif
+
+
+/* C_GetFunctionList returns the function list. */
+CK_PKCS11_FUNCTION_INFO(C_GetFunctionList)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to
+ * function list
+ */
+);
+#endif
+
+
+
+/* Slot and token management */
+
+/* C_GetSlotList obtains a list of slots in the system. */
+CK_PKCS11_FUNCTION_INFO(C_GetSlotList)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_BBOOL tokenPresent, /* only slots with tokens */
+ CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */
+ CK_ULONG_PTR pulCount /* receives number of slots */
+);
+#endif
+
+
+/* C_GetSlotInfo obtains information about a particular slot in
+ * the system.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SLOT_ID slotID, /* the ID of the slot */
+ CK_SLOT_INFO_PTR pInfo /* receives the slot information */
+);
+#endif
+
+
+/* C_GetTokenInfo obtains information about a particular token
+ * in the system.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SLOT_ID slotID, /* ID of the token's slot */
+ CK_TOKEN_INFO_PTR pInfo /* receives the token information */
+);
+#endif
+
+
+/* C_GetMechanismList obtains a list of mechanism types
+ * supported by a token.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GetMechanismList)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SLOT_ID slotID, /* ID of token's slot */
+ CK_MECHANISM_TYPE_PTR pMechanismList, /* gets mech. array */
+ CK_ULONG_PTR pulCount /* gets # of mechs. */
+);
+#endif
+
+
+/* C_GetMechanismInfo obtains information about a particular
+ * mechanism possibly supported by a token.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SLOT_ID slotID, /* ID of the token's slot */
+ CK_MECHANISM_TYPE type, /* type of mechanism */
+ CK_MECHANISM_INFO_PTR pInfo /* receives mechanism info */
+);
+#endif
+
+
+/* C_InitToken initializes a token. */
+CK_PKCS11_FUNCTION_INFO(C_InitToken)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SLOT_ID slotID, /* ID of the token's slot */
+ CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */
+ CK_ULONG ulPinLen, /* length in bytes of the PIN */
+ CK_UTF8CHAR_PTR pLabel /* 32-byte token label (blank padded) */
+);
+#endif
+
+
+/* C_InitPIN initializes the normal user's PIN. */
+CK_PKCS11_FUNCTION_INFO(C_InitPIN)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_UTF8CHAR_PTR pPin, /* the normal user's PIN */
+ CK_ULONG ulPinLen /* length in bytes of the PIN */
+);
+#endif
+
+
+/* C_SetPIN modifies the PIN of the user who is logged in. */
+CK_PKCS11_FUNCTION_INFO(C_SetPIN)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_UTF8CHAR_PTR pOldPin, /* the old PIN */
+ CK_ULONG ulOldLen, /* length of the old PIN */
+ CK_UTF8CHAR_PTR pNewPin, /* the new PIN */
+ CK_ULONG ulNewLen /* length of the new PIN */
+);
+#endif
+
+
+
+/* Session management */
+
+/* C_OpenSession opens a session between an application and a
+ * token.
+ */
+CK_PKCS11_FUNCTION_INFO(C_OpenSession)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SLOT_ID slotID, /* the slot's ID */
+ CK_FLAGS flags, /* from CK_SESSION_INFO */
+ CK_VOID_PTR pApplication, /* passed to callback */
+ CK_NOTIFY Notify, /* callback function */
+ CK_SESSION_HANDLE_PTR phSession /* gets session handle */
+);
+#endif
+
+
+/* C_CloseSession closes a session between an application and a
+ * token.
+ */
+CK_PKCS11_FUNCTION_INFO(C_CloseSession)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession /* the session's handle */
+);
+#endif
+
+
+/* C_CloseAllSessions closes all sessions with a token. */
+CK_PKCS11_FUNCTION_INFO(C_CloseAllSessions)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SLOT_ID slotID /* the token's slot */
+);
+#endif
+
+
+/* C_GetSessionInfo obtains information about the session. */
+CK_PKCS11_FUNCTION_INFO(C_GetSessionInfo)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_SESSION_INFO_PTR pInfo /* receives session info */
+);
+#endif
+
+
+/* C_GetOperationState obtains the state of the cryptographic operation
+ * in a session.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GetOperationState)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pOperationState, /* gets state */
+ CK_ULONG_PTR pulOperationStateLen /* gets state length */
+);
+#endif
+
+
+/* C_SetOperationState restores the state of the cryptographic
+ * operation in a session.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SetOperationState)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pOperationState, /* holds state */
+ CK_ULONG ulOperationStateLen, /* holds state length */
+ CK_OBJECT_HANDLE hEncryptionKey, /* en/decryption key */
+ CK_OBJECT_HANDLE hAuthenticationKey /* sign/verify key */
+);
+#endif
+
+
+/* C_Login logs a user into a token. */
+CK_PKCS11_FUNCTION_INFO(C_Login)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_USER_TYPE userType, /* the user type */
+ CK_UTF8CHAR_PTR pPin, /* the user's PIN */
+ CK_ULONG ulPinLen /* the length of the PIN */
+);
+#endif
+
+
+/* C_Logout logs a user out from a token. */
+CK_PKCS11_FUNCTION_INFO(C_Logout)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession /* the session's handle */
+);
+#endif
+
+
+
+/* Object management */
+
+/* C_CreateObject creates a new object. */
+CK_PKCS11_FUNCTION_INFO(C_CreateObject)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_ATTRIBUTE_PTR pTemplate, /* the object's template */
+ CK_ULONG ulCount, /* attributes in template */
+ CK_OBJECT_HANDLE_PTR phObject /* gets new object's handle. */
+);
+#endif
+
+
+/* C_CopyObject copies an object, creating a new object for the
+ * copy.
+ */
+CK_PKCS11_FUNCTION_INFO(C_CopyObject)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_OBJECT_HANDLE hObject, /* the object's handle */
+ CK_ATTRIBUTE_PTR pTemplate, /* template for new object */
+ CK_ULONG ulCount, /* attributes in template */
+ CK_OBJECT_HANDLE_PTR phNewObject /* receives handle of copy */
+);
+#endif
+
+
+/* C_DestroyObject destroys an object. */
+CK_PKCS11_FUNCTION_INFO(C_DestroyObject)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_OBJECT_HANDLE hObject /* the object's handle */
+);
+#endif
+
+
+/* C_GetObjectSize gets the size of an object in bytes. */
+CK_PKCS11_FUNCTION_INFO(C_GetObjectSize)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_OBJECT_HANDLE hObject, /* the object's handle */
+ CK_ULONG_PTR pulSize /* receives size of object */
+);
+#endif
+
+
+/* C_GetAttributeValue obtains the value of one or more object
+ * attributes.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_OBJECT_HANDLE hObject, /* the object's handle */
+ CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs; gets vals */
+ CK_ULONG ulCount /* attributes in template */
+);
+#endif
+
+
+/* C_SetAttributeValue modifies the value of one or more object
+ * attributes.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_OBJECT_HANDLE hObject, /* the object's handle */
+ CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs and values */
+ CK_ULONG ulCount /* attributes in template */
+);
+#endif
+
+
+/* C_FindObjectsInit initializes a search for token and session
+ * objects that match a template.
+ */
+CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_ATTRIBUTE_PTR pTemplate, /* attribute values to match */
+ CK_ULONG ulCount /* attrs in search template */
+);
+#endif
+
+
+/* C_FindObjects continues a search for token and session
+ * objects that match a template, obtaining additional object
+ * handles.
+ */
+CK_PKCS11_FUNCTION_INFO(C_FindObjects)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_OBJECT_HANDLE_PTR phObject, /* gets obj. handles */
+ CK_ULONG ulMaxObjectCount, /* max handles to get */
+ CK_ULONG_PTR pulObjectCount /* actual # returned */
+);
+#endif
+
+
+/* C_FindObjectsFinal finishes a search for token and session
+ * objects.
+ */
+CK_PKCS11_FUNCTION_INFO(C_FindObjectsFinal)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession /* the session's handle */
+);
+#endif
+
+
+
+/* Encryption and decryption */
+
+/* C_EncryptInit initializes an encryption operation. */
+CK_PKCS11_FUNCTION_INFO(C_EncryptInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */
+ CK_OBJECT_HANDLE hKey /* handle of encryption key */
+);
+#endif
+
+
+/* C_Encrypt encrypts single-part data. */
+CK_PKCS11_FUNCTION_INFO(C_Encrypt)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pData, /* the plaintext data */
+ CK_ULONG ulDataLen, /* bytes of plaintext */
+ CK_BYTE_PTR pEncryptedData, /* gets ciphertext */
+ CK_ULONG_PTR pulEncryptedDataLen /* gets c-text size */
+);
+#endif
+
+
+/* C_EncryptUpdate continues a multiple-part encryption
+ * operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pPart, /* the plaintext data */
+ CK_ULONG ulPartLen, /* plaintext data len */
+ CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */
+ CK_ULONG_PTR pulEncryptedPartLen /* gets c-text size */
+);
+#endif
+
+
+/* C_EncryptFinal finishes a multiple-part encryption
+ * operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_EncryptFinal)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session handle */
+ CK_BYTE_PTR pLastEncryptedPart, /* last c-text */
+ CK_ULONG_PTR pulLastEncryptedPartLen /* gets last size */
+);
+#endif
+
+
+/* C_DecryptInit initializes a decryption operation. */
+CK_PKCS11_FUNCTION_INFO(C_DecryptInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */
+ CK_OBJECT_HANDLE hKey /* handle of decryption key */
+);
+#endif
+
+
+/* C_Decrypt decrypts encrypted data in a single part. */
+CK_PKCS11_FUNCTION_INFO(C_Decrypt)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pEncryptedData, /* ciphertext */
+ CK_ULONG ulEncryptedDataLen, /* ciphertext length */
+ CK_BYTE_PTR pData, /* gets plaintext */
+ CK_ULONG_PTR pulDataLen /* gets p-text size */
+);
+#endif
+
+
+/* C_DecryptUpdate continues a multiple-part decryption
+ * operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pEncryptedPart, /* encrypted data */
+ CK_ULONG ulEncryptedPartLen, /* input length */
+ CK_BYTE_PTR pPart, /* gets plaintext */
+ CK_ULONG_PTR pulPartLen /* p-text size */
+);
+#endif
+
+
+/* C_DecryptFinal finishes a multiple-part decryption
+ * operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DecryptFinal)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pLastPart, /* gets plaintext */
+ CK_ULONG_PTR pulLastPartLen /* p-text size */
+);
+#endif
+
+
+
+/* Message digesting */
+
+/* C_DigestInit initializes a message-digesting operation. */
+CK_PKCS11_FUNCTION_INFO(C_DigestInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism /* the digesting mechanism */
+);
+#endif
+
+
+/* C_Digest digests data in a single part. */
+CK_PKCS11_FUNCTION_INFO(C_Digest)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pData, /* data to be digested */
+ CK_ULONG ulDataLen, /* bytes of data to digest */
+ CK_BYTE_PTR pDigest, /* gets the message digest */
+ CK_ULONG_PTR pulDigestLen /* gets digest length */
+);
+#endif
+
+
+/* C_DigestUpdate continues a multiple-part message-digesting
+ * operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DigestUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pPart, /* data to be digested */
+ CK_ULONG ulPartLen /* bytes of data to be digested */
+);
+#endif
+
+
+/* C_DigestKey continues a multi-part message-digesting
+ * operation, by digesting the value of a secret key as part of
+ * the data already digested.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DigestKey)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_OBJECT_HANDLE hKey /* secret key to digest */
+);
+#endif
+
+
+/* C_DigestFinal finishes a multiple-part message-digesting
+ * operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DigestFinal)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pDigest, /* gets the message digest */
+ CK_ULONG_PTR pulDigestLen /* gets byte count of digest */
+);
+#endif
+
+
+
+/* Signing and MACing */
+
+/* C_SignInit initializes a signature (private key encryption)
+ * operation, where the signature is (will be) an appendix to
+ * the data, and plaintext cannot be recovered from the
+ * signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SignInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* the signature mechanism */
+ CK_OBJECT_HANDLE hKey /* handle of signature key */
+);
+#endif
+
+
+/* C_Sign signs (encrypts with private key) data in a single
+ * part, where the signature is (will be) an appendix to the
+ * data, and plaintext cannot be recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_Sign)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pData, /* the data to sign */
+ CK_ULONG ulDataLen, /* count of bytes to sign */
+ CK_BYTE_PTR pSignature, /* gets the signature */
+ CK_ULONG_PTR pulSignatureLen /* gets signature length */
+);
+#endif
+
+
+/* C_SignUpdate continues a multiple-part signature operation,
+ * where the signature is (will be) an appendix to the data,
+ * and plaintext cannot be recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SignUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pPart, /* the data to sign */
+ CK_ULONG ulPartLen /* count of bytes to sign */
+);
+#endif
+
+
+/* C_SignFinal finishes a multiple-part signature operation,
+ * returning the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SignFinal)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pSignature, /* gets the signature */
+ CK_ULONG_PTR pulSignatureLen /* gets signature length */
+);
+#endif
+
+
+/* C_SignRecoverInit initializes a signature operation, where
+ * the data can be recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* the signature mechanism */
+ CK_OBJECT_HANDLE hKey /* handle of the signature key */
+);
+#endif
+
+
+/* C_SignRecover signs data in a single operation, where the
+ * data can be recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SignRecover)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pData, /* the data to sign */
+ CK_ULONG ulDataLen, /* count of bytes to sign */
+ CK_BYTE_PTR pSignature, /* gets the signature */
+ CK_ULONG_PTR pulSignatureLen /* gets signature length */
+);
+#endif
+
+
+
+/* Verifying signatures and MACs */
+
+/* C_VerifyInit initializes a verification operation, where the
+ * signature is an appendix to the data, and plaintext cannot
+ * cannot be recovered from the signature (e.g. DSA).
+ */
+CK_PKCS11_FUNCTION_INFO(C_VerifyInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* the verification mechanism */
+ CK_OBJECT_HANDLE hKey /* verification key */
+);
+#endif
+
+
+/* C_Verify verifies a signature in a single-part operation,
+ * where the signature is an appendix to the data, and plaintext
+ * cannot be recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_Verify)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pData, /* signed data */
+ CK_ULONG ulDataLen, /* length of signed data */
+ CK_BYTE_PTR pSignature, /* signature */
+ CK_ULONG ulSignatureLen /* signature length*/
+);
+#endif
+
+
+/* C_VerifyUpdate continues a multiple-part verification
+ * operation, where the signature is an appendix to the data,
+ * and plaintext cannot be recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pPart, /* signed data */
+ CK_ULONG ulPartLen /* length of signed data */
+);
+#endif
+
+
+/* C_VerifyFinal finishes a multiple-part verification
+ * operation, checking the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_VerifyFinal)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pSignature, /* signature to verify */
+ CK_ULONG ulSignatureLen /* signature length */
+);
+#endif
+
+
+/* C_VerifyRecoverInit initializes a signature verification
+ * operation, where the data is recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* the verification mechanism */
+ CK_OBJECT_HANDLE hKey /* verification key */
+);
+#endif
+
+
+/* C_VerifyRecover verifies a signature in a single-part
+ * operation, where the data is recovered from the signature.
+ */
+CK_PKCS11_FUNCTION_INFO(C_VerifyRecover)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pSignature, /* signature to verify */
+ CK_ULONG ulSignatureLen, /* signature length */
+ CK_BYTE_PTR pData, /* gets signed data */
+ CK_ULONG_PTR pulDataLen /* gets signed data len */
+);
+#endif
+
+
+
+/* Dual-function cryptographic operations */
+
+/* C_DigestEncryptUpdate continues a multiple-part digesting
+ * and encryption operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pPart, /* the plaintext data */
+ CK_ULONG ulPartLen, /* plaintext length */
+ CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */
+ CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */
+);
+#endif
+
+
+/* C_DecryptDigestUpdate continues a multiple-part decryption and
+ * digesting operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pEncryptedPart, /* ciphertext */
+ CK_ULONG ulEncryptedPartLen, /* ciphertext length */
+ CK_BYTE_PTR pPart, /* gets plaintext */
+ CK_ULONG_PTR pulPartLen /* gets plaintext len */
+);
+#endif
+
+
+/* C_SignEncryptUpdate continues a multiple-part signing and
+ * encryption operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pPart, /* the plaintext data */
+ CK_ULONG ulPartLen, /* plaintext length */
+ CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */
+ CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */
+);
+#endif
+
+
+/* C_DecryptVerifyUpdate continues a multiple-part decryption and
+ * verify operation.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_BYTE_PTR pEncryptedPart, /* ciphertext */
+ CK_ULONG ulEncryptedPartLen, /* ciphertext length */
+ CK_BYTE_PTR pPart, /* gets plaintext */
+ CK_ULONG_PTR pulPartLen /* gets p-text length */
+);
+#endif
+
+
+
+/* Key management */
+
+/* C_GenerateKey generates a secret key, creating a new key
+ * object.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GenerateKey)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* key generation mech. */
+ CK_ATTRIBUTE_PTR pTemplate, /* template for new key */
+ CK_ULONG ulCount, /* # of attrs in template */
+ CK_OBJECT_HANDLE_PTR phKey /* gets handle of new key */
+);
+#endif
+
+
+/* C_GenerateKeyPair generates a public-key/private-key pair,
+ * creating new key objects.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GenerateKeyPair)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session handle */
+ CK_MECHANISM_PTR pMechanism, /* key-gen mech. */
+ CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */
+ CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */
+ CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */
+ CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */
+ CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */
+ CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */
+);
+#endif
+
+
+/* C_WrapKey wraps (i.e., encrypts) a key. */
+CK_PKCS11_FUNCTION_INFO(C_WrapKey)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_MECHANISM_PTR pMechanism, /* the wrapping mechanism */
+ CK_OBJECT_HANDLE hWrappingKey, /* wrapping key */
+ CK_OBJECT_HANDLE hKey, /* key to be wrapped */
+ CK_BYTE_PTR pWrappedKey, /* gets wrapped key */
+ CK_ULONG_PTR pulWrappedKeyLen /* gets wrapped key size */
+);
+#endif
+
+
+/* C_UnwrapKey unwraps (decrypts) a wrapped key, creating a new
+ * key object.
+ */
+CK_PKCS11_FUNCTION_INFO(C_UnwrapKey)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_MECHANISM_PTR pMechanism, /* unwrapping mech. */
+ CK_OBJECT_HANDLE hUnwrappingKey, /* unwrapping key */
+ CK_BYTE_PTR pWrappedKey, /* the wrapped key */
+ CK_ULONG ulWrappedKeyLen, /* wrapped key len */
+ CK_ATTRIBUTE_PTR pTemplate, /* new key template */
+ CK_ULONG ulAttributeCount, /* template length */
+ CK_OBJECT_HANDLE_PTR phKey /* gets new handle */
+);
+#endif
+
+
+/* C_DeriveKey derives a key from a base key, creating a new key
+ * object.
+ */
+CK_PKCS11_FUNCTION_INFO(C_DeriveKey)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* session's handle */
+ CK_MECHANISM_PTR pMechanism, /* key deriv. mech. */
+ CK_OBJECT_HANDLE hBaseKey, /* base key */
+ CK_ATTRIBUTE_PTR pTemplate, /* new key template */
+ CK_ULONG ulAttributeCount, /* template length */
+ CK_OBJECT_HANDLE_PTR phKey /* gets new handle */
+);
+#endif
+
+
+
+/* Random number generation */
+
+/* C_SeedRandom mixes additional seed material into the token's
+ * random number generator.
+ */
+CK_PKCS11_FUNCTION_INFO(C_SeedRandom)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR pSeed, /* the seed material */
+ CK_ULONG ulSeedLen /* length of seed material */
+);
+#endif
+
+
+/* C_GenerateRandom generates random data. */
+CK_PKCS11_FUNCTION_INFO(C_GenerateRandom)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_BYTE_PTR RandomData, /* receives the random data */
+ CK_ULONG ulRandomLen /* # of bytes to generate */
+);
+#endif
+
+
+
+/* Parallel function management */
+
+/* C_GetFunctionStatus is a legacy function; it obtains an
+ * updated status of a function running in parallel with an
+ * application.
+ */
+CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession /* the session's handle */
+);
+#endif
+
+
+/* C_CancelFunction is a legacy function; it cancels a function
+ * running in parallel.
+ */
+CK_PKCS11_FUNCTION_INFO(C_CancelFunction)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_SESSION_HANDLE hSession /* the session's handle */
+);
+#endif
+
+
+/* C_WaitForSlotEvent waits for a slot event (token insertion,
+ * removal, etc.) to occur.
+ */
+CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent)
+#ifdef CK_NEED_ARG_LIST
+(
+ CK_FLAGS flags, /* blocking/nonblocking flag */
+ CK_SLOT_ID_PTR pSlot, /* location that receives the slot ID */
+ CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */
+);
+#endif
+
diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11t.h b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11t.h
new file mode 100644
index 0000000000..c13e67cf55
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/source/pkcs11/v2.40/pkcs11t.h
@@ -0,0 +1,2003 @@
+/* Copyright (c) OASIS Open 2016. All Rights Reserved./
+ * /Distributed under the terms of the OASIS IPR Policy,
+ * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
+ * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
+ */
+
+/* Latest version of the specification:
+ * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html
+ */
+
+/* See top of pkcs11.h for information about the macros that
+ * must be defined and the structure-packing conventions that
+ * must be set before including this file.
+ */
+
+#ifndef _PKCS11T_H_
+#define _PKCS11T_H_ 1
+
+#define CRYPTOKI_VERSION_MAJOR 2
+#define CRYPTOKI_VERSION_MINOR 40
+#define CRYPTOKI_VERSION_AMENDMENT 0
+
+#define CK_TRUE 1
+#define CK_FALSE 0
+
+#ifndef CK_DISABLE_TRUE_FALSE
+#ifndef FALSE
+#define FALSE CK_FALSE
+#endif
+#ifndef TRUE
+#define TRUE CK_TRUE
+#endif
+#endif
+
+/* an unsigned 8-bit value */
+typedef unsigned char CK_BYTE;
+
+/* an unsigned 8-bit character */
+typedef CK_BYTE CK_CHAR;
+
+/* an 8-bit UTF-8 character */
+typedef CK_BYTE CK_UTF8CHAR;
+
+/* a BYTE-sized Boolean flag */
+typedef CK_BYTE CK_BBOOL;
+
+/* an unsigned value, at least 32 bits long */
+typedef unsigned long int CK_ULONG;
+
+/* a signed value, the same size as a CK_ULONG */
+typedef long int CK_LONG;
+
+/* at least 32 bits; each bit is a Boolean flag */
+typedef CK_ULONG CK_FLAGS;
+
+
+/* some special values for certain CK_ULONG variables */
+#define CK_UNAVAILABLE_INFORMATION (~0UL)
+#define CK_EFFECTIVELY_INFINITE 0UL
+
+
+typedef CK_BYTE CK_PTR CK_BYTE_PTR;
+typedef CK_CHAR CK_PTR CK_CHAR_PTR;
+typedef CK_UTF8CHAR CK_PTR CK_UTF8CHAR_PTR;
+typedef CK_ULONG CK_PTR CK_ULONG_PTR;
+typedef void CK_PTR CK_VOID_PTR;
+
+/* Pointer to a CK_VOID_PTR-- i.e., pointer to pointer to void */
+typedef CK_VOID_PTR CK_PTR CK_VOID_PTR_PTR;
+
+
+/* The following value is always invalid if used as a session
+ * handle or object handle
+ */
+#define CK_INVALID_HANDLE 0UL
+
+
+typedef struct CK_VERSION {
+ CK_BYTE major; /* integer portion of version number */
+ CK_BYTE minor; /* 1/100ths portion of version number */
+} CK_VERSION;
+
+typedef CK_VERSION CK_PTR CK_VERSION_PTR;
+
+
+typedef struct CK_INFO {
+ CK_VERSION cryptokiVersion; /* Cryptoki interface ver */
+ CK_UTF8CHAR manufacturerID[32]; /* blank padded */
+ CK_FLAGS flags; /* must be zero */
+ CK_UTF8CHAR libraryDescription[32]; /* blank padded */
+ CK_VERSION libraryVersion; /* version of library */
+} CK_INFO;
+
+typedef CK_INFO CK_PTR CK_INFO_PTR;
+
+
+/* CK_NOTIFICATION enumerates the types of notifications that
+ * Cryptoki provides to an application
+ */
+typedef CK_ULONG CK_NOTIFICATION;
+#define CKN_SURRENDER 0UL
+#define CKN_OTP_CHANGED 1UL
+
+typedef CK_ULONG CK_SLOT_ID;
+
+typedef CK_SLOT_ID CK_PTR CK_SLOT_ID_PTR;
+
+
+/* CK_SLOT_INFO provides information about a slot */
+typedef struct CK_SLOT_INFO {
+ CK_UTF8CHAR slotDescription[64]; /* blank padded */
+ CK_UTF8CHAR manufacturerID[32]; /* blank padded */
+ CK_FLAGS flags;
+
+ CK_VERSION hardwareVersion; /* version of hardware */
+ CK_VERSION firmwareVersion; /* version of firmware */
+} CK_SLOT_INFO;
+
+/* flags: bit flags that provide capabilities of the slot
+ * Bit Flag Mask Meaning
+ */
+#define CKF_TOKEN_PRESENT 0x00000001UL /* a token is there */
+#define CKF_REMOVABLE_DEVICE 0x00000002UL /* removable devices*/
+#define CKF_HW_SLOT 0x00000004UL /* hardware slot */
+
+typedef CK_SLOT_INFO CK_PTR CK_SLOT_INFO_PTR;
+
+
+/* CK_TOKEN_INFO provides information about a token */
+typedef struct CK_TOKEN_INFO {
+ CK_UTF8CHAR label[32]; /* blank padded */
+ CK_UTF8CHAR manufacturerID[32]; /* blank padded */
+ CK_UTF8CHAR model[16]; /* blank padded */
+ CK_CHAR serialNumber[16]; /* blank padded */
+ CK_FLAGS flags; /* see below */
+
+ CK_ULONG ulMaxSessionCount; /* max open sessions */
+ CK_ULONG ulSessionCount; /* sess. now open */
+ CK_ULONG ulMaxRwSessionCount; /* max R/W sessions */
+ CK_ULONG ulRwSessionCount; /* R/W sess. now open */
+ CK_ULONG ulMaxPinLen; /* in bytes */
+ CK_ULONG ulMinPinLen; /* in bytes */
+ CK_ULONG ulTotalPublicMemory; /* in bytes */
+ CK_ULONG ulFreePublicMemory; /* in bytes */
+ CK_ULONG ulTotalPrivateMemory; /* in bytes */
+ CK_ULONG ulFreePrivateMemory; /* in bytes */
+ CK_VERSION hardwareVersion; /* version of hardware */
+ CK_VERSION firmwareVersion; /* version of firmware */
+ CK_CHAR utcTime[16]; /* time */
+} CK_TOKEN_INFO;
+
+/* The flags parameter is defined as follows:
+ * Bit Flag Mask Meaning
+ */
+#define CKF_RNG 0x00000001UL /* has random # generator */
+#define CKF_WRITE_PROTECTED 0x00000002UL /* token is write-protected */
+#define CKF_LOGIN_REQUIRED 0x00000004UL /* user must login */
+#define CKF_USER_PIN_INITIALIZED 0x00000008UL /* normal user's PIN is set */
+
+/* CKF_RESTORE_KEY_NOT_NEEDED. If it is set,
+ * that means that *every* time the state of cryptographic
+ * operations of a session is successfully saved, all keys
+ * needed to continue those operations are stored in the state
+ */
+#define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL
+
+/* CKF_CLOCK_ON_TOKEN. If it is set, that means
+ * that the token has some sort of clock. The time on that
+ * clock is returned in the token info structure
+ */
+#define CKF_CLOCK_ON_TOKEN 0x00000040UL
+
+/* CKF_PROTECTED_AUTHENTICATION_PATH. If it is
+ * set, that means that there is some way for the user to login
+ * without sending a PIN through the Cryptoki library itself
+ */
+#define CKF_PROTECTED_AUTHENTICATION_PATH 0x00000100UL
+
+/* CKF_DUAL_CRYPTO_OPERATIONS. If it is true,
+ * that means that a single session with the token can perform
+ * dual simultaneous cryptographic operations (digest and
+ * encrypt; decrypt and digest; sign and encrypt; and decrypt
+ * and sign)
+ */
+#define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL
+
+/* CKF_TOKEN_INITIALIZED. If it is true, the
+ * token has been initialized using C_InitializeToken or an
+ * equivalent mechanism outside the scope of PKCS #11.
+ * Calling C_InitializeToken when this flag is set will cause
+ * the token to be reinitialized.
+ */
+#define CKF_TOKEN_INITIALIZED 0x00000400UL
+
+/* CKF_SECONDARY_AUTHENTICATION. If it is
+ * true, the token supports secondary authentication for
+ * private key objects.
+ */
+#define CKF_SECONDARY_AUTHENTICATION 0x00000800UL
+
+/* CKF_USER_PIN_COUNT_LOW. If it is true, an
+ * incorrect user login PIN has been entered at least once
+ * since the last successful authentication.
+ */
+#define CKF_USER_PIN_COUNT_LOW 0x00010000UL
+
+/* CKF_USER_PIN_FINAL_TRY. If it is true,
+ * supplying an incorrect user PIN will it to become locked.
+ */
+#define CKF_USER_PIN_FINAL_TRY 0x00020000UL
+
+/* CKF_USER_PIN_LOCKED. If it is true, the
+ * user PIN has been locked. User login to the token is not
+ * possible.
+ */
+#define CKF_USER_PIN_LOCKED 0x00040000UL
+
+/* CKF_USER_PIN_TO_BE_CHANGED. If it is true,
+ * the user PIN value is the default value set by token
+ * initialization or manufacturing, or the PIN has been
+ * expired by the card.
+ */
+#define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL
+
+/* CKF_SO_PIN_COUNT_LOW. If it is true, an
+ * incorrect SO login PIN has been entered at least once since
+ * the last successful authentication.
+ */
+#define CKF_SO_PIN_COUNT_LOW 0x00100000UL
+
+/* CKF_SO_PIN_FINAL_TRY. If it is true,
+ * supplying an incorrect SO PIN will it to become locked.
+ */
+#define CKF_SO_PIN_FINAL_TRY 0x00200000UL
+
+/* CKF_SO_PIN_LOCKED. If it is true, the SO
+ * PIN has been locked. SO login to the token is not possible.
+ */
+#define CKF_SO_PIN_LOCKED 0x00400000UL
+
+/* CKF_SO_PIN_TO_BE_CHANGED. If it is true,
+ * the SO PIN value is the default value set by token
+ * initialization or manufacturing, or the PIN has been
+ * expired by the card.
+ */
+#define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL
+
+#define CKF_ERROR_STATE 0x01000000UL
+
+typedef CK_TOKEN_INFO CK_PTR CK_TOKEN_INFO_PTR;
+
+
+/* CK_SESSION_HANDLE is a Cryptoki-assigned value that
+ * identifies a session
+ */
+typedef CK_ULONG CK_SESSION_HANDLE;
+
+typedef CK_SESSION_HANDLE CK_PTR CK_SESSION_HANDLE_PTR;
+
+
+/* CK_USER_TYPE enumerates the types of Cryptoki users */
+typedef CK_ULONG CK_USER_TYPE;
+/* Security Officer */
+#define CKU_SO 0UL
+/* Normal user */
+#define CKU_USER 1UL
+/* Context specific */
+#define CKU_CONTEXT_SPECIFIC 2UL
+
+/* CK_STATE enumerates the session states */
+typedef CK_ULONG CK_STATE;
+#define CKS_RO_PUBLIC_SESSION 0UL
+#define CKS_RO_USER_FUNCTIONS 1UL
+#define CKS_RW_PUBLIC_SESSION 2UL
+#define CKS_RW_USER_FUNCTIONS 3UL
+#define CKS_RW_SO_FUNCTIONS 4UL
+
+/* CK_SESSION_INFO provides information about a session */
+typedef struct CK_SESSION_INFO {
+ CK_SLOT_ID slotID;
+ CK_STATE state;
+ CK_FLAGS flags; /* see below */
+ CK_ULONG ulDeviceError; /* device-dependent error code */
+} CK_SESSION_INFO;
+
+/* The flags are defined in the following table:
+ * Bit Flag Mask Meaning
+ */
+#define CKF_RW_SESSION 0x00000002UL /* session is r/w */
+#define CKF_SERIAL_SESSION 0x00000004UL /* no parallel */
+
+typedef CK_SESSION_INFO CK_PTR CK_SESSION_INFO_PTR;
+
+
+/* CK_OBJECT_HANDLE is a token-specific identifier for an
+ * object
+ */
+typedef CK_ULONG CK_OBJECT_HANDLE;
+
+typedef CK_OBJECT_HANDLE CK_PTR CK_OBJECT_HANDLE_PTR;
+
+
+/* CK_OBJECT_CLASS is a value that identifies the classes (or
+ * types) of objects that Cryptoki recognizes. It is defined
+ * as follows:
+ */
+typedef CK_ULONG CK_OBJECT_CLASS;
+
+/* The following classes of objects are defined: */
+#define CKO_DATA 0x00000000UL
+#define CKO_CERTIFICATE 0x00000001UL
+#define CKO_PUBLIC_KEY 0x00000002UL
+#define CKO_PRIVATE_KEY 0x00000003UL
+#define CKO_SECRET_KEY 0x00000004UL
+#define CKO_HW_FEATURE 0x00000005UL
+#define CKO_DOMAIN_PARAMETERS 0x00000006UL
+#define CKO_MECHANISM 0x00000007UL
+#define CKO_OTP_KEY 0x00000008UL
+
+#define CKO_VENDOR_DEFINED 0x80000000UL
+
+typedef CK_OBJECT_CLASS CK_PTR CK_OBJECT_CLASS_PTR;
+
+/* CK_HW_FEATURE_TYPE is a value that identifies the hardware feature type
+ * of an object with CK_OBJECT_CLASS equal to CKO_HW_FEATURE.
+ */
+typedef CK_ULONG CK_HW_FEATURE_TYPE;
+
+/* The following hardware feature types are defined */
+#define CKH_MONOTONIC_COUNTER 0x00000001UL
+#define CKH_CLOCK 0x00000002UL
+#define CKH_USER_INTERFACE 0x00000003UL
+#define CKH_VENDOR_DEFINED 0x80000000UL
+
+/* CK_KEY_TYPE is a value that identifies a key type */
+typedef CK_ULONG CK_KEY_TYPE;
+
+/* the following key types are defined: */
+#define CKK_RSA 0x00000000UL
+#define CKK_DSA 0x00000001UL
+#define CKK_DH 0x00000002UL
+#define CKK_ECDSA 0x00000003UL /* Deprecated */
+#define CKK_EC 0x00000003UL
+#define CKK_X9_42_DH 0x00000004UL
+#define CKK_KEA 0x00000005UL
+#define CKK_GENERIC_SECRET 0x00000010UL
+#define CKK_RC2 0x00000011UL
+#define CKK_RC4 0x00000012UL
+#define CKK_DES 0x00000013UL
+#define CKK_DES2 0x00000014UL
+#define CKK_DES3 0x00000015UL
+#define CKK_CAST 0x00000016UL
+#define CKK_CAST3 0x00000017UL
+#define CKK_CAST5 0x00000018UL /* Deprecated */
+#define CKK_CAST128 0x00000018UL
+#define CKK_RC5 0x00000019UL
+#define CKK_IDEA 0x0000001AUL
+#define CKK_SKIPJACK 0x0000001BUL
+#define CKK_BATON 0x0000001CUL
+#define CKK_JUNIPER 0x0000001DUL
+#define CKK_CDMF 0x0000001EUL
+#define CKK_AES 0x0000001FUL
+#define CKK_BLOWFISH 0x00000020UL
+#define CKK_TWOFISH 0x00000021UL
+#define CKK_SECURID 0x00000022UL
+#define CKK_HOTP 0x00000023UL
+#define CKK_ACTI 0x00000024UL
+#define CKK_CAMELLIA 0x00000025UL
+#define CKK_ARIA 0x00000026UL
+
+#define CKK_MD5_HMAC 0x00000027UL
+#define CKK_SHA_1_HMAC 0x00000028UL
+#define CKK_RIPEMD128_HMAC 0x00000029UL
+#define CKK_RIPEMD160_HMAC 0x0000002AUL
+#define CKK_SHA256_HMAC 0x0000002BUL
+#define CKK_SHA384_HMAC 0x0000002CUL
+#define CKK_SHA512_HMAC 0x0000002DUL
+#define CKK_SHA224_HMAC 0x0000002EUL
+
+#define CKK_SEED 0x0000002FUL
+#define CKK_GOSTR3410 0x00000030UL
+#define CKK_GOSTR3411 0x00000031UL
+#define CKK_GOST28147 0x00000032UL
+
+
+
+#define CKK_VENDOR_DEFINED 0x80000000UL
+
+
+/* CK_CERTIFICATE_TYPE is a value that identifies a certificate
+ * type
+ */
+typedef CK_ULONG CK_CERTIFICATE_TYPE;
+
+#define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL
+#define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL
+#define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL
+#define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL
+
+#define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL
+#define CK_SECURITY_DOMAIN_MANUFACTURER 1UL
+#define CK_SECURITY_DOMAIN_OPERATOR 2UL
+#define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL
+
+
+/* The following certificate types are defined: */
+#define CKC_X_509 0x00000000UL
+#define CKC_X_509_ATTR_CERT 0x00000001UL
+#define CKC_WTLS 0x00000002UL
+#define CKC_VENDOR_DEFINED 0x80000000UL
+
+
+/* CK_ATTRIBUTE_TYPE is a value that identifies an attribute
+ * type
+ */
+typedef CK_ULONG CK_ATTRIBUTE_TYPE;
+
+/* The CKF_ARRAY_ATTRIBUTE flag identifies an attribute which
+ * consists of an array of values.
+ */
+#define CKF_ARRAY_ATTRIBUTE 0x40000000UL
+
+/* The following OTP-related defines relate to the CKA_OTP_FORMAT attribute */
+#define CK_OTP_FORMAT_DECIMAL 0UL
+#define CK_OTP_FORMAT_HEXADECIMAL 1UL
+#define CK_OTP_FORMAT_ALPHANUMERIC 2UL
+#define CK_OTP_FORMAT_BINARY 3UL
+
+/* The following OTP-related defines relate to the CKA_OTP_..._REQUIREMENT
+ * attributes
+ */
+#define CK_OTP_PARAM_IGNORED 0UL
+#define CK_OTP_PARAM_OPTIONAL 1UL
+#define CK_OTP_PARAM_MANDATORY 2UL
+
+/* The following attribute types are defined: */
+#define CKA_CLASS 0x00000000UL
+#define CKA_TOKEN 0x00000001UL
+#define CKA_PRIVATE 0x00000002UL
+#define CKA_LABEL 0x00000003UL
+#define CKA_APPLICATION 0x00000010UL
+#define CKA_VALUE 0x00000011UL
+#define CKA_OBJECT_ID 0x00000012UL
+#define CKA_CERTIFICATE_TYPE 0x00000080UL
+#define CKA_ISSUER 0x00000081UL
+#define CKA_SERIAL_NUMBER 0x00000082UL
+#define CKA_AC_ISSUER 0x00000083UL
+#define CKA_OWNER 0x00000084UL
+#define CKA_ATTR_TYPES 0x00000085UL
+#define CKA_TRUSTED 0x00000086UL
+#define CKA_CERTIFICATE_CATEGORY 0x00000087UL
+#define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL
+#define CKA_URL 0x00000089UL
+#define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL
+#define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL
+#define CKA_NAME_HASH_ALGORITHM 0x0000008CUL
+#define CKA_CHECK_VALUE 0x00000090UL
+
+#define CKA_KEY_TYPE 0x00000100UL
+#define CKA_SUBJECT 0x00000101UL
+#define CKA_ID 0x00000102UL
+#define CKA_SENSITIVE 0x00000103UL
+#define CKA_ENCRYPT 0x00000104UL
+#define CKA_DECRYPT 0x00000105UL
+#define CKA_WRAP 0x00000106UL
+#define CKA_UNWRAP 0x00000107UL
+#define CKA_SIGN 0x00000108UL
+#define CKA_SIGN_RECOVER 0x00000109UL
+#define CKA_VERIFY 0x0000010AUL
+#define CKA_VERIFY_RECOVER 0x0000010BUL
+#define CKA_DERIVE 0x0000010CUL
+#define CKA_START_DATE 0x00000110UL
+#define CKA_END_DATE 0x00000111UL
+#define CKA_MODULUS 0x00000120UL
+#define CKA_MODULUS_BITS 0x00000121UL
+#define CKA_PUBLIC_EXPONENT 0x00000122UL
+#define CKA_PRIVATE_EXPONENT 0x00000123UL
+#define CKA_PRIME_1 0x00000124UL
+#define CKA_PRIME_2 0x00000125UL
+#define CKA_EXPONENT_1 0x00000126UL
+#define CKA_EXPONENT_2 0x00000127UL
+#define CKA_COEFFICIENT 0x00000128UL
+#define CKA_PUBLIC_KEY_INFO 0x00000129UL
+#define CKA_PRIME 0x00000130UL
+#define CKA_SUBPRIME 0x00000131UL
+#define CKA_BASE 0x00000132UL
+
+#define CKA_PRIME_BITS 0x00000133UL
+#define CKA_SUBPRIME_BITS 0x00000134UL
+#define CKA_SUB_PRIME_BITS CKA_SUBPRIME_BITS
+
+#define CKA_VALUE_BITS 0x00000160UL
+#define CKA_VALUE_LEN 0x00000161UL
+#define CKA_EXTRACTABLE 0x00000162UL
+#define CKA_LOCAL 0x00000163UL
+#define CKA_NEVER_EXTRACTABLE 0x00000164UL
+#define CKA_ALWAYS_SENSITIVE 0x00000165UL
+#define CKA_KEY_GEN_MECHANISM 0x00000166UL
+
+#define CKA_MODIFIABLE 0x00000170UL
+#define CKA_COPYABLE 0x00000171UL
+
+#define CKA_DESTROYABLE 0x00000172UL
+
+#define CKA_ECDSA_PARAMS 0x00000180UL /* Deprecated */
+#define CKA_EC_PARAMS 0x00000180UL
+
+#define CKA_EC_POINT 0x00000181UL
+
+#define CKA_SECONDARY_AUTH 0x00000200UL /* Deprecated */
+#define CKA_AUTH_PIN_FLAGS 0x00000201UL /* Deprecated */
+
+#define CKA_ALWAYS_AUTHENTICATE 0x00000202UL
+
+#define CKA_WRAP_WITH_TRUSTED 0x00000210UL
+#define CKA_WRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000211UL)
+#define CKA_UNWRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000212UL)
+#define CKA_DERIVE_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000213UL)
+
+#define CKA_OTP_FORMAT 0x00000220UL
+#define CKA_OTP_LENGTH 0x00000221UL
+#define CKA_OTP_TIME_INTERVAL 0x00000222UL
+#define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL
+#define CKA_OTP_CHALLENGE_REQUIREMENT 0x00000224UL
+#define CKA_OTP_TIME_REQUIREMENT 0x00000225UL
+#define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL
+#define CKA_OTP_PIN_REQUIREMENT 0x00000227UL
+#define CKA_OTP_COUNTER 0x0000022EUL
+#define CKA_OTP_TIME 0x0000022FUL
+#define CKA_OTP_USER_IDENTIFIER 0x0000022AUL
+#define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL
+#define CKA_OTP_SERVICE_LOGO 0x0000022CUL
+#define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL
+
+#define CKA_GOSTR3410_PARAMS 0x00000250UL
+#define CKA_GOSTR3411_PARAMS 0x00000251UL
+#define CKA_GOST28147_PARAMS 0x00000252UL
+
+#define CKA_HW_FEATURE_TYPE 0x00000300UL
+#define CKA_RESET_ON_INIT 0x00000301UL
+#define CKA_HAS_RESET 0x00000302UL
+
+#define CKA_PIXEL_X 0x00000400UL
+#define CKA_PIXEL_Y 0x00000401UL
+#define CKA_RESOLUTION 0x00000402UL
+#define CKA_CHAR_ROWS 0x00000403UL
+#define CKA_CHAR_COLUMNS 0x00000404UL
+#define CKA_COLOR 0x00000405UL
+#define CKA_BITS_PER_PIXEL 0x00000406UL
+#define CKA_CHAR_SETS 0x00000480UL
+#define CKA_ENCODING_METHODS 0x00000481UL
+#define CKA_MIME_TYPES 0x00000482UL
+#define CKA_MECHANISM_TYPE 0x00000500UL
+#define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL
+#define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL
+#define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL
+#define CKA_ALLOWED_MECHANISMS (CKF_ARRAY_ATTRIBUTE|0x00000600UL)
+
+#define CKA_VENDOR_DEFINED 0x80000000UL
+
+/* CK_ATTRIBUTE is a structure that includes the type, length
+ * and value of an attribute
+ */
+typedef struct CK_ATTRIBUTE {
+ CK_ATTRIBUTE_TYPE type;
+ CK_VOID_PTR pValue;
+ CK_ULONG ulValueLen; /* in bytes */
+} CK_ATTRIBUTE;
+
+typedef CK_ATTRIBUTE CK_PTR CK_ATTRIBUTE_PTR;
+
+/* CK_DATE is a structure that defines a date */
+typedef struct CK_DATE{
+ CK_CHAR year[4]; /* the year ("1900" - "9999") */
+ CK_CHAR month[2]; /* the month ("01" - "12") */
+ CK_CHAR day[2]; /* the day ("01" - "31") */
+} CK_DATE;
+
+
+/* CK_MECHANISM_TYPE is a value that identifies a mechanism
+ * type
+ */
+typedef CK_ULONG CK_MECHANISM_TYPE;
+
+/* the following mechanism types are defined: */
+#define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL
+#define CKM_RSA_PKCS 0x00000001UL
+#define CKM_RSA_9796 0x00000002UL
+#define CKM_RSA_X_509 0x00000003UL
+
+#define CKM_MD2_RSA_PKCS 0x00000004UL
+#define CKM_MD5_RSA_PKCS 0x00000005UL
+#define CKM_SHA1_RSA_PKCS 0x00000006UL
+
+#define CKM_RIPEMD128_RSA_PKCS 0x00000007UL
+#define CKM_RIPEMD160_RSA_PKCS 0x00000008UL
+#define CKM_RSA_PKCS_OAEP 0x00000009UL
+
+#define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL
+#define CKM_RSA_X9_31 0x0000000BUL
+#define CKM_SHA1_RSA_X9_31 0x0000000CUL
+#define CKM_RSA_PKCS_PSS 0x0000000DUL
+#define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL
+
+#define CKM_DSA_KEY_PAIR_GEN 0x00000010UL
+#define CKM_DSA 0x00000011UL
+#define CKM_DSA_SHA1 0x00000012UL
+#define CKM_DSA_SHA224 0x00000013UL
+#define CKM_DSA_SHA256 0x00000014UL
+#define CKM_DSA_SHA384 0x00000015UL
+#define CKM_DSA_SHA512 0x00000016UL
+
+#define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL
+#define CKM_DH_PKCS_DERIVE 0x00000021UL
+
+#define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL
+#define CKM_X9_42_DH_DERIVE 0x00000031UL
+#define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL
+#define CKM_X9_42_MQV_DERIVE 0x00000033UL
+
+#define CKM_SHA256_RSA_PKCS 0x00000040UL
+#define CKM_SHA384_RSA_PKCS 0x00000041UL
+#define CKM_SHA512_RSA_PKCS 0x00000042UL
+#define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL
+#define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL
+#define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL
+
+#define CKM_SHA224_RSA_PKCS 0x00000046UL
+#define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL
+
+#define CKM_SHA512_224 0x00000048UL
+#define CKM_SHA512_224_HMAC 0x00000049UL
+#define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL
+#define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL
+#define CKM_SHA512_256 0x0000004CUL
+#define CKM_SHA512_256_HMAC 0x0000004DUL
+#define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL
+#define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL
+
+#define CKM_SHA512_T 0x00000050UL
+#define CKM_SHA512_T_HMAC 0x00000051UL
+#define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL
+#define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL
+
+#define CKM_RC2_KEY_GEN 0x00000100UL
+#define CKM_RC2_ECB 0x00000101UL
+#define CKM_RC2_CBC 0x00000102UL
+#define CKM_RC2_MAC 0x00000103UL
+
+#define CKM_RC2_MAC_GENERAL 0x00000104UL
+#define CKM_RC2_CBC_PAD 0x00000105UL
+
+#define CKM_RC4_KEY_GEN 0x00000110UL
+#define CKM_RC4 0x00000111UL
+#define CKM_DES_KEY_GEN 0x00000120UL
+#define CKM_DES_ECB 0x00000121UL
+#define CKM_DES_CBC 0x00000122UL
+#define CKM_DES_MAC 0x00000123UL
+
+#define CKM_DES_MAC_GENERAL 0x00000124UL
+#define CKM_DES_CBC_PAD 0x00000125UL
+
+#define CKM_DES2_KEY_GEN 0x00000130UL
+#define CKM_DES3_KEY_GEN 0x00000131UL
+#define CKM_DES3_ECB 0x00000132UL
+#define CKM_DES3_CBC 0x00000133UL
+#define CKM_DES3_MAC 0x00000134UL
+
+#define CKM_DES3_MAC_GENERAL 0x00000135UL
+#define CKM_DES3_CBC_PAD 0x00000136UL
+#define CKM_DES3_CMAC_GENERAL 0x00000137UL
+#define CKM_DES3_CMAC 0x00000138UL
+#define CKM_CDMF_KEY_GEN 0x00000140UL
+#define CKM_CDMF_ECB 0x00000141UL
+#define CKM_CDMF_CBC 0x00000142UL
+#define CKM_CDMF_MAC 0x00000143UL
+#define CKM_CDMF_MAC_GENERAL 0x00000144UL
+#define CKM_CDMF_CBC_PAD 0x00000145UL
+
+#define CKM_DES_OFB64 0x00000150UL
+#define CKM_DES_OFB8 0x00000151UL
+#define CKM_DES_CFB64 0x00000152UL
+#define CKM_DES_CFB8 0x00000153UL
+
+#define CKM_MD2 0x00000200UL
+
+#define CKM_MD2_HMAC 0x00000201UL
+#define CKM_MD2_HMAC_GENERAL 0x00000202UL
+
+#define CKM_MD5 0x00000210UL
+
+#define CKM_MD5_HMAC 0x00000211UL
+#define CKM_MD5_HMAC_GENERAL 0x00000212UL
+
+#define CKM_SHA_1 0x00000220UL
+
+#define CKM_SHA_1_HMAC 0x00000221UL
+#define CKM_SHA_1_HMAC_GENERAL 0x00000222UL
+
+#define CKM_RIPEMD128 0x00000230UL
+#define CKM_RIPEMD128_HMAC 0x00000231UL
+#define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL
+#define CKM_RIPEMD160 0x00000240UL
+#define CKM_RIPEMD160_HMAC 0x00000241UL
+#define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL
+
+#define CKM_SHA256 0x00000250UL
+#define CKM_SHA256_HMAC 0x00000251UL
+#define CKM_SHA256_HMAC_GENERAL 0x00000252UL
+#define CKM_SHA224 0x00000255UL
+#define CKM_SHA224_HMAC 0x00000256UL
+#define CKM_SHA224_HMAC_GENERAL 0x00000257UL
+#define CKM_SHA384 0x00000260UL
+#define CKM_SHA384_HMAC 0x00000261UL
+#define CKM_SHA384_HMAC_GENERAL 0x00000262UL
+#define CKM_SHA512 0x00000270UL
+#define CKM_SHA512_HMAC 0x00000271UL
+#define CKM_SHA512_HMAC_GENERAL 0x00000272UL
+#define CKM_SECURID_KEY_GEN 0x00000280UL
+#define CKM_SECURID 0x00000282UL
+#define CKM_HOTP_KEY_GEN 0x00000290UL
+#define CKM_HOTP 0x00000291UL
+#define CKM_ACTI 0x000002A0UL
+#define CKM_ACTI_KEY_GEN 0x000002A1UL
+
+#define CKM_CAST_KEY_GEN 0x00000300UL
+#define CKM_CAST_ECB 0x00000301UL
+#define CKM_CAST_CBC 0x00000302UL
+#define CKM_CAST_MAC 0x00000303UL
+#define CKM_CAST_MAC_GENERAL 0x00000304UL
+#define CKM_CAST_CBC_PAD 0x00000305UL
+#define CKM_CAST3_KEY_GEN 0x00000310UL
+#define CKM_CAST3_ECB 0x00000311UL
+#define CKM_CAST3_CBC 0x00000312UL
+#define CKM_CAST3_MAC 0x00000313UL
+#define CKM_CAST3_MAC_GENERAL 0x00000314UL
+#define CKM_CAST3_CBC_PAD 0x00000315UL
+/* Note that CAST128 and CAST5 are the same algorithm */
+#define CKM_CAST5_KEY_GEN 0x00000320UL
+#define CKM_CAST128_KEY_GEN 0x00000320UL
+#define CKM_CAST5_ECB 0x00000321UL
+#define CKM_CAST128_ECB 0x00000321UL
+#define CKM_CAST5_CBC 0x00000322UL /* Deprecated */
+#define CKM_CAST128_CBC 0x00000322UL
+#define CKM_CAST5_MAC 0x00000323UL /* Deprecated */
+#define CKM_CAST128_MAC 0x00000323UL
+#define CKM_CAST5_MAC_GENERAL 0x00000324UL /* Deprecated */
+#define CKM_CAST128_MAC_GENERAL 0x00000324UL
+#define CKM_CAST5_CBC_PAD 0x00000325UL /* Deprecated */
+#define CKM_CAST128_CBC_PAD 0x00000325UL
+#define CKM_RC5_KEY_GEN 0x00000330UL
+#define CKM_RC5_ECB 0x00000331UL
+#define CKM_RC5_CBC 0x00000332UL
+#define CKM_RC5_MAC 0x00000333UL
+#define CKM_RC5_MAC_GENERAL 0x00000334UL
+#define CKM_RC5_CBC_PAD 0x00000335UL
+#define CKM_IDEA_KEY_GEN 0x00000340UL
+#define CKM_IDEA_ECB 0x00000341UL
+#define CKM_IDEA_CBC 0x00000342UL
+#define CKM_IDEA_MAC 0x00000343UL
+#define CKM_IDEA_MAC_GENERAL 0x00000344UL
+#define CKM_IDEA_CBC_PAD 0x00000345UL
+#define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL
+#define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL
+#define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL
+#define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL
+#define CKM_XOR_BASE_AND_DATA 0x00000364UL
+#define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL
+#define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL
+#define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL
+#define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL
+
+#define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL
+#define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL
+#define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL
+#define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL
+#define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL
+
+#define CKM_TLS_PRF 0x00000378UL
+
+#define CKM_SSL3_MD5_MAC 0x00000380UL
+#define CKM_SSL3_SHA1_MAC 0x00000381UL
+#define CKM_MD5_KEY_DERIVATION 0x00000390UL
+#define CKM_MD2_KEY_DERIVATION 0x00000391UL
+#define CKM_SHA1_KEY_DERIVATION 0x00000392UL
+
+#define CKM_SHA256_KEY_DERIVATION 0x00000393UL
+#define CKM_SHA384_KEY_DERIVATION 0x00000394UL
+#define CKM_SHA512_KEY_DERIVATION 0x00000395UL
+#define CKM_SHA224_KEY_DERIVATION 0x00000396UL
+
+#define CKM_PBE_MD2_DES_CBC 0x000003A0UL
+#define CKM_PBE_MD5_DES_CBC 0x000003A1UL
+#define CKM_PBE_MD5_CAST_CBC 0x000003A2UL
+#define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL
+#define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL /* Deprecated */
+#define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL
+#define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL /* Deprecated */
+#define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL
+#define CKM_PBE_SHA1_RC4_128 0x000003A6UL
+#define CKM_PBE_SHA1_RC4_40 0x000003A7UL
+#define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL
+#define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL
+#define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL
+#define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL
+
+#define CKM_PKCS5_PBKD2 0x000003B0UL
+
+#define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL
+
+#define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL
+#define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL
+#define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL
+#define CKM_WTLS_PRF 0x000003D3UL
+#define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL
+#define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL
+
+#define CKM_TLS10_MAC_SERVER 0x000003D6UL
+#define CKM_TLS10_MAC_CLIENT 0x000003D7UL
+#define CKM_TLS12_MAC 0x000003D8UL
+#define CKM_TLS12_KDF 0x000003D9UL
+#define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL
+#define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL
+#define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL
+#define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL
+#define CKM_TLS_MAC 0x000003E4UL
+#define CKM_TLS_KDF 0x000003E5UL
+
+#define CKM_KEY_WRAP_LYNKS 0x00000400UL
+#define CKM_KEY_WRAP_SET_OAEP 0x00000401UL
+
+#define CKM_CMS_SIG 0x00000500UL
+#define CKM_KIP_DERIVE 0x00000510UL
+#define CKM_KIP_WRAP 0x00000511UL
+#define CKM_KIP_MAC 0x00000512UL
+
+#define CKM_CAMELLIA_KEY_GEN 0x00000550UL
+#define CKM_CAMELLIA_ECB 0x00000551UL
+#define CKM_CAMELLIA_CBC 0x00000552UL
+#define CKM_CAMELLIA_MAC 0x00000553UL
+#define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL
+#define CKM_CAMELLIA_CBC_PAD 0x00000555UL
+#define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL
+#define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL
+#define CKM_CAMELLIA_CTR 0x00000558UL
+
+#define CKM_ARIA_KEY_GEN 0x00000560UL
+#define CKM_ARIA_ECB 0x00000561UL
+#define CKM_ARIA_CBC 0x00000562UL
+#define CKM_ARIA_MAC 0x00000563UL
+#define CKM_ARIA_MAC_GENERAL 0x00000564UL
+#define CKM_ARIA_CBC_PAD 0x00000565UL
+#define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL
+#define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL
+
+#define CKM_SEED_KEY_GEN 0x00000650UL
+#define CKM_SEED_ECB 0x00000651UL
+#define CKM_SEED_CBC 0x00000652UL
+#define CKM_SEED_MAC 0x00000653UL
+#define CKM_SEED_MAC_GENERAL 0x00000654UL
+#define CKM_SEED_CBC_PAD 0x00000655UL
+#define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL
+#define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL
+
+#define CKM_SKIPJACK_KEY_GEN 0x00001000UL
+#define CKM_SKIPJACK_ECB64 0x00001001UL
+#define CKM_SKIPJACK_CBC64 0x00001002UL
+#define CKM_SKIPJACK_OFB64 0x00001003UL
+#define CKM_SKIPJACK_CFB64 0x00001004UL
+#define CKM_SKIPJACK_CFB32 0x00001005UL
+#define CKM_SKIPJACK_CFB16 0x00001006UL
+#define CKM_SKIPJACK_CFB8 0x00001007UL
+#define CKM_SKIPJACK_WRAP 0x00001008UL
+#define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL
+#define CKM_SKIPJACK_RELAYX 0x0000100aUL
+#define CKM_KEA_KEY_PAIR_GEN 0x00001010UL
+#define CKM_KEA_KEY_DERIVE 0x00001011UL
+#define CKM_KEA_DERIVE 0x00001012UL
+#define CKM_FORTEZZA_TIMESTAMP 0x00001020UL
+#define CKM_BATON_KEY_GEN 0x00001030UL
+#define CKM_BATON_ECB128 0x00001031UL
+#define CKM_BATON_ECB96 0x00001032UL
+#define CKM_BATON_CBC128 0x00001033UL
+#define CKM_BATON_COUNTER 0x00001034UL
+#define CKM_BATON_SHUFFLE 0x00001035UL
+#define CKM_BATON_WRAP 0x00001036UL
+
+#define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL /* Deprecated */
+#define CKM_EC_KEY_PAIR_GEN 0x00001040UL
+
+#define CKM_ECDSA 0x00001041UL
+#define CKM_ECDSA_SHA1 0x00001042UL
+#define CKM_ECDSA_SHA224 0x00001043UL
+#define CKM_ECDSA_SHA256 0x00001044UL
+#define CKM_ECDSA_SHA384 0x00001045UL
+#define CKM_ECDSA_SHA512 0x00001046UL
+
+#define CKM_ECDH1_DERIVE 0x00001050UL
+#define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL
+#define CKM_ECMQV_DERIVE 0x00001052UL
+
+#define CKM_ECDH_AES_KEY_WRAP 0x00001053UL
+#define CKM_RSA_AES_KEY_WRAP 0x00001054UL
+
+#define CKM_JUNIPER_KEY_GEN 0x00001060UL
+#define CKM_JUNIPER_ECB128 0x00001061UL
+#define CKM_JUNIPER_CBC128 0x00001062UL
+#define CKM_JUNIPER_COUNTER 0x00001063UL
+#define CKM_JUNIPER_SHUFFLE 0x00001064UL
+#define CKM_JUNIPER_WRAP 0x00001065UL
+#define CKM_FASTHASH 0x00001070UL
+
+#define CKM_AES_KEY_GEN 0x00001080UL
+#define CKM_AES_ECB 0x00001081UL
+#define CKM_AES_CBC 0x00001082UL
+#define CKM_AES_MAC 0x00001083UL
+#define CKM_AES_MAC_GENERAL 0x00001084UL
+#define CKM_AES_CBC_PAD 0x00001085UL
+#define CKM_AES_CTR 0x00001086UL
+#define CKM_AES_GCM 0x00001087UL
+#define CKM_AES_CCM 0x00001088UL
+#define CKM_AES_CTS 0x00001089UL
+#define CKM_AES_CMAC 0x0000108AUL
+#define CKM_AES_CMAC_GENERAL 0x0000108BUL
+
+#define CKM_AES_XCBC_MAC 0x0000108CUL
+#define CKM_AES_XCBC_MAC_96 0x0000108DUL
+#define CKM_AES_GMAC 0x0000108EUL
+
+#define CKM_BLOWFISH_KEY_GEN 0x00001090UL
+#define CKM_BLOWFISH_CBC 0x00001091UL
+#define CKM_TWOFISH_KEY_GEN 0x00001092UL
+#define CKM_TWOFISH_CBC 0x00001093UL
+#define CKM_BLOWFISH_CBC_PAD 0x00001094UL
+#define CKM_TWOFISH_CBC_PAD 0x00001095UL
+
+#define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL
+#define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL
+#define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL
+#define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL
+#define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL
+#define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL
+
+#define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL
+#define CKM_GOSTR3410 0x00001201UL
+#define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL
+#define CKM_GOSTR3410_KEY_WRAP 0x00001203UL
+#define CKM_GOSTR3410_DERIVE 0x00001204UL
+#define CKM_GOSTR3411 0x00001210UL
+#define CKM_GOSTR3411_HMAC 0x00001211UL
+#define CKM_GOST28147_KEY_GEN 0x00001220UL
+#define CKM_GOST28147_ECB 0x00001221UL
+#define CKM_GOST28147 0x00001222UL
+#define CKM_GOST28147_MAC 0x00001223UL
+#define CKM_GOST28147_KEY_WRAP 0x00001224UL
+
+#define CKM_DSA_PARAMETER_GEN 0x00002000UL
+#define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL
+#define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL
+#define CKM_DSA_PROBABLISTIC_PARAMETER_GEN 0x00002003UL
+#define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL
+
+#define CKM_AES_OFB 0x00002104UL
+#define CKM_AES_CFB64 0x00002105UL
+#define CKM_AES_CFB8 0x00002106UL
+#define CKM_AES_CFB128 0x00002107UL
+
+#define CKM_AES_CFB1 0x00002108UL
+#define CKM_AES_KEY_WRAP 0x00002109UL /* WAS: 0x00001090 */
+#define CKM_AES_KEY_WRAP_PAD 0x0000210AUL /* WAS: 0x00001091 */
+
+#define CKM_RSA_PKCS_TPM_1_1 0x00004001UL
+#define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL
+
+#define CKM_VENDOR_DEFINED 0x80000000UL
+
+typedef CK_MECHANISM_TYPE CK_PTR CK_MECHANISM_TYPE_PTR;
+
+
+/* CK_MECHANISM is a structure that specifies a particular
+ * mechanism
+ */
+typedef struct CK_MECHANISM {
+ CK_MECHANISM_TYPE mechanism;
+ CK_VOID_PTR pParameter;
+ CK_ULONG ulParameterLen; /* in bytes */
+} CK_MECHANISM;
+
+typedef CK_MECHANISM CK_PTR CK_MECHANISM_PTR;
+
+
+/* CK_MECHANISM_INFO provides information about a particular
+ * mechanism
+ */
+typedef struct CK_MECHANISM_INFO {
+ CK_ULONG ulMinKeySize;
+ CK_ULONG ulMaxKeySize;
+ CK_FLAGS flags;
+} CK_MECHANISM_INFO;
+
+/* The flags are defined as follows:
+ * Bit Flag Mask Meaning */
+#define CKF_HW 0x00000001UL /* performed by HW */
+
+/* Specify whether or not a mechanism can be used for a particular task */
+#define CKF_ENCRYPT 0x00000100UL
+#define CKF_DECRYPT 0x00000200UL
+#define CKF_DIGEST 0x00000400UL
+#define CKF_SIGN 0x00000800UL
+#define CKF_SIGN_RECOVER 0x00001000UL
+#define CKF_VERIFY 0x00002000UL
+#define CKF_VERIFY_RECOVER 0x00004000UL
+#define CKF_GENERATE 0x00008000UL
+#define CKF_GENERATE_KEY_PAIR 0x00010000UL
+#define CKF_WRAP 0x00020000UL
+#define CKF_UNWRAP 0x00040000UL
+#define CKF_DERIVE 0x00080000UL
+
+/* Describe a token's EC capabilities not available in mechanism
+ * information.
+ */
+#define CKF_EC_F_P 0x00100000UL
+#define CKF_EC_F_2M 0x00200000UL
+#define CKF_EC_ECPARAMETERS 0x00400000UL
+#define CKF_EC_NAMEDCURVE 0x00800000UL
+#define CKF_EC_UNCOMPRESS 0x01000000UL
+#define CKF_EC_COMPRESS 0x02000000UL
+
+#define CKF_EXTENSION 0x80000000UL
+
+typedef CK_MECHANISM_INFO CK_PTR CK_MECHANISM_INFO_PTR;
+
+/* CK_RV is a value that identifies the return value of a
+ * Cryptoki function
+ */
+typedef CK_ULONG CK_RV;
+
+#define CKR_OK 0x00000000UL
+#define CKR_CANCEL 0x00000001UL
+#define CKR_HOST_MEMORY 0x00000002UL
+#define CKR_SLOT_ID_INVALID 0x00000003UL
+
+#define CKR_GENERAL_ERROR 0x00000005UL
+#define CKR_FUNCTION_FAILED 0x00000006UL
+
+#define CKR_ARGUMENTS_BAD 0x00000007UL
+#define CKR_NO_EVENT 0x00000008UL
+#define CKR_NEED_TO_CREATE_THREADS 0x00000009UL
+#define CKR_CANT_LOCK 0x0000000AUL
+
+#define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL
+#define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL
+#define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL
+#define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL
+
+#define CKR_ACTION_PROHIBITED 0x0000001BUL
+
+#define CKR_DATA_INVALID 0x00000020UL
+#define CKR_DATA_LEN_RANGE 0x00000021UL
+#define CKR_DEVICE_ERROR 0x00000030UL
+#define CKR_DEVICE_MEMORY 0x00000031UL
+#define CKR_DEVICE_REMOVED 0x00000032UL
+#define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL
+#define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL
+#define CKR_FUNCTION_CANCELED 0x00000050UL
+#define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL
+
+#define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL
+
+#define CKR_KEY_HANDLE_INVALID 0x00000060UL
+
+#define CKR_KEY_SIZE_RANGE 0x00000062UL
+#define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL
+
+#define CKR_KEY_NOT_NEEDED 0x00000064UL
+#define CKR_KEY_CHANGED 0x00000065UL
+#define CKR_KEY_NEEDED 0x00000066UL
+#define CKR_KEY_INDIGESTIBLE 0x00000067UL
+#define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL
+#define CKR_KEY_NOT_WRAPPABLE 0x00000069UL
+#define CKR_KEY_UNEXTRACTABLE 0x0000006AUL
+
+#define CKR_MECHANISM_INVALID 0x00000070UL
+#define CKR_MECHANISM_PARAM_INVALID 0x00000071UL
+
+#define CKR_OBJECT_HANDLE_INVALID 0x00000082UL
+#define CKR_OPERATION_ACTIVE 0x00000090UL
+#define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL
+#define CKR_PIN_INCORRECT 0x000000A0UL
+#define CKR_PIN_INVALID 0x000000A1UL
+#define CKR_PIN_LEN_RANGE 0x000000A2UL
+
+#define CKR_PIN_EXPIRED 0x000000A3UL
+#define CKR_PIN_LOCKED 0x000000A4UL
+
+#define CKR_SESSION_CLOSED 0x000000B0UL
+#define CKR_SESSION_COUNT 0x000000B1UL
+#define CKR_SESSION_HANDLE_INVALID 0x000000B3UL
+#define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL
+#define CKR_SESSION_READ_ONLY 0x000000B5UL
+#define CKR_SESSION_EXISTS 0x000000B6UL
+
+#define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL
+#define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL
+
+#define CKR_SIGNATURE_INVALID 0x000000C0UL
+#define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL
+#define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL
+#define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL
+#define CKR_TOKEN_NOT_PRESENT 0x000000E0UL
+#define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL
+#define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL
+#define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL
+#define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL
+#define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL
+#define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL
+#define CKR_USER_NOT_LOGGED_IN 0x00000101UL
+#define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL
+#define CKR_USER_TYPE_INVALID 0x00000103UL
+
+#define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL
+#define CKR_USER_TOO_MANY_TYPES 0x00000105UL
+
+#define CKR_WRAPPED_KEY_INVALID 0x00000110UL
+#define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL
+#define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL
+#define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL
+#define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL
+#define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL
+
+#define CKR_RANDOM_NO_RNG 0x00000121UL
+
+#define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL
+
+#define CKR_CURVE_NOT_SUPPORTED 0x00000140UL
+
+#define CKR_BUFFER_TOO_SMALL 0x00000150UL
+#define CKR_SAVED_STATE_INVALID 0x00000160UL
+#define CKR_INFORMATION_SENSITIVE 0x00000170UL
+#define CKR_STATE_UNSAVEABLE 0x00000180UL
+
+#define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL
+#define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL
+#define CKR_MUTEX_BAD 0x000001A0UL
+#define CKR_MUTEX_NOT_LOCKED 0x000001A1UL
+
+#define CKR_NEW_PIN_MODE 0x000001B0UL
+#define CKR_NEXT_OTP 0x000001B1UL
+
+#define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL
+#define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL
+#define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL
+#define CKR_PIN_TOO_WEAK 0x000001B8UL
+#define CKR_PUBLIC_KEY_INVALID 0x000001B9UL
+
+#define CKR_FUNCTION_REJECTED 0x00000200UL
+
+#define CKR_VENDOR_DEFINED 0x80000000UL
+
+
+/* CK_NOTIFY is an application callback that processes events */
+typedef CK_CALLBACK_FUNCTION(CK_RV, CK_NOTIFY)(
+ CK_SESSION_HANDLE hSession, /* the session's handle */
+ CK_NOTIFICATION event,
+ CK_VOID_PTR pApplication /* passed to C_OpenSession */
+);
+
+
+/* CK_FUNCTION_LIST is a structure holding a Cryptoki spec
+ * version and pointers of appropriate types to all the
+ * Cryptoki functions
+ */
+typedef struct CK_FUNCTION_LIST CK_FUNCTION_LIST;
+
+typedef CK_FUNCTION_LIST CK_PTR CK_FUNCTION_LIST_PTR;
+
+typedef CK_FUNCTION_LIST_PTR CK_PTR CK_FUNCTION_LIST_PTR_PTR;
+
+
+/* CK_CREATEMUTEX is an application callback for creating a
+ * mutex object
+ */
+typedef CK_CALLBACK_FUNCTION(CK_RV, CK_CREATEMUTEX)(
+ CK_VOID_PTR_PTR ppMutex /* location to receive ptr to mutex */
+);
+
+
+/* CK_DESTROYMUTEX is an application callback for destroying a
+ * mutex object
+ */
+typedef CK_CALLBACK_FUNCTION(CK_RV, CK_DESTROYMUTEX)(
+ CK_VOID_PTR pMutex /* pointer to mutex */
+);
+
+
+/* CK_LOCKMUTEX is an application callback for locking a mutex */
+typedef CK_CALLBACK_FUNCTION(CK_RV, CK_LOCKMUTEX)(
+ CK_VOID_PTR pMutex /* pointer to mutex */
+);
+
+
+/* CK_UNLOCKMUTEX is an application callback for unlocking a
+ * mutex
+ */
+typedef CK_CALLBACK_FUNCTION(CK_RV, CK_UNLOCKMUTEX)(
+ CK_VOID_PTR pMutex /* pointer to mutex */
+);
+
+
+/* CK_C_INITIALIZE_ARGS provides the optional arguments to
+ * C_Initialize
+ */
+typedef struct CK_C_INITIALIZE_ARGS {
+ CK_CREATEMUTEX CreateMutex;
+ CK_DESTROYMUTEX DestroyMutex;
+ CK_LOCKMUTEX LockMutex;
+ CK_UNLOCKMUTEX UnlockMutex;
+ CK_FLAGS flags;
+ CK_VOID_PTR pReserved;
+} CK_C_INITIALIZE_ARGS;
+
+/* flags: bit flags that provide capabilities of the slot
+ * Bit Flag Mask Meaning
+ */
+#define CKF_LIBRARY_CANT_CREATE_OS_THREADS 0x00000001UL
+#define CKF_OS_LOCKING_OK 0x00000002UL
+
+typedef CK_C_INITIALIZE_ARGS CK_PTR CK_C_INITIALIZE_ARGS_PTR;
+
+
+/* additional flags for parameters to functions */
+
+/* CKF_DONT_BLOCK is for the function C_WaitForSlotEvent */
+#define CKF_DONT_BLOCK 1
+
+/* CK_RSA_PKCS_MGF_TYPE is used to indicate the Message
+ * Generation Function (MGF) applied to a message block when
+ * formatting a message block for the PKCS #1 OAEP encryption
+ * scheme.
+ */
+typedef CK_ULONG CK_RSA_PKCS_MGF_TYPE;
+
+typedef CK_RSA_PKCS_MGF_TYPE CK_PTR CK_RSA_PKCS_MGF_TYPE_PTR;
+
+/* The following MGFs are defined */
+#define CKG_MGF1_SHA1 0x00000001UL
+#define CKG_MGF1_SHA256 0x00000002UL
+#define CKG_MGF1_SHA384 0x00000003UL
+#define CKG_MGF1_SHA512 0x00000004UL
+#define CKG_MGF1_SHA224 0x00000005UL
+
+/* CK_RSA_PKCS_OAEP_SOURCE_TYPE is used to indicate the source
+ * of the encoding parameter when formatting a message block
+ * for the PKCS #1 OAEP encryption scheme.
+ */
+typedef CK_ULONG CK_RSA_PKCS_OAEP_SOURCE_TYPE;
+
+typedef CK_RSA_PKCS_OAEP_SOURCE_TYPE CK_PTR CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR;
+
+/* The following encoding parameter sources are defined */
+#define CKZ_DATA_SPECIFIED 0x00000001UL
+
+/* CK_RSA_PKCS_OAEP_PARAMS provides the parameters to the
+ * CKM_RSA_PKCS_OAEP mechanism.
+ */
+typedef struct CK_RSA_PKCS_OAEP_PARAMS {
+ CK_MECHANISM_TYPE hashAlg;
+ CK_RSA_PKCS_MGF_TYPE mgf;
+ CK_RSA_PKCS_OAEP_SOURCE_TYPE source;
+ CK_VOID_PTR pSourceData;
+ CK_ULONG ulSourceDataLen;
+} CK_RSA_PKCS_OAEP_PARAMS;
+
+typedef CK_RSA_PKCS_OAEP_PARAMS CK_PTR CK_RSA_PKCS_OAEP_PARAMS_PTR;
+
+/* CK_RSA_PKCS_PSS_PARAMS provides the parameters to the
+ * CKM_RSA_PKCS_PSS mechanism(s).
+ */
+typedef struct CK_RSA_PKCS_PSS_PARAMS {
+ CK_MECHANISM_TYPE hashAlg;
+ CK_RSA_PKCS_MGF_TYPE mgf;
+ CK_ULONG sLen;
+} CK_RSA_PKCS_PSS_PARAMS;
+
+typedef CK_RSA_PKCS_PSS_PARAMS CK_PTR CK_RSA_PKCS_PSS_PARAMS_PTR;
+
+typedef CK_ULONG CK_EC_KDF_TYPE;
+
+/* The following EC Key Derivation Functions are defined */
+#define CKD_NULL 0x00000001UL
+#define CKD_SHA1_KDF 0x00000002UL
+
+/* The following X9.42 DH key derivation functions are defined */
+#define CKD_SHA1_KDF_ASN1 0x00000003UL
+#define CKD_SHA1_KDF_CONCATENATE 0x00000004UL
+#define CKD_SHA224_KDF 0x00000005UL
+#define CKD_SHA256_KDF 0x00000006UL
+#define CKD_SHA384_KDF 0x00000007UL
+#define CKD_SHA512_KDF 0x00000008UL
+#define CKD_CPDIVERSIFY_KDF 0x00000009UL
+
+
+/* CK_ECDH1_DERIVE_PARAMS provides the parameters to the
+ * CKM_ECDH1_DERIVE and CKM_ECDH1_COFACTOR_DERIVE mechanisms,
+ * where each party contributes one key pair.
+ */
+typedef struct CK_ECDH1_DERIVE_PARAMS {
+ CK_EC_KDF_TYPE kdf;
+ CK_ULONG ulSharedDataLen;
+ CK_BYTE_PTR pSharedData;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+} CK_ECDH1_DERIVE_PARAMS;
+
+typedef CK_ECDH1_DERIVE_PARAMS CK_PTR CK_ECDH1_DERIVE_PARAMS_PTR;
+
+/*
+ * CK_ECDH2_DERIVE_PARAMS provides the parameters to the
+ * CKM_ECMQV_DERIVE mechanism, where each party contributes two key pairs.
+ */
+typedef struct CK_ECDH2_DERIVE_PARAMS {
+ CK_EC_KDF_TYPE kdf;
+ CK_ULONG ulSharedDataLen;
+ CK_BYTE_PTR pSharedData;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+ CK_ULONG ulPrivateDataLen;
+ CK_OBJECT_HANDLE hPrivateData;
+ CK_ULONG ulPublicDataLen2;
+ CK_BYTE_PTR pPublicData2;
+} CK_ECDH2_DERIVE_PARAMS;
+
+typedef CK_ECDH2_DERIVE_PARAMS CK_PTR CK_ECDH2_DERIVE_PARAMS_PTR;
+
+typedef struct CK_ECMQV_DERIVE_PARAMS {
+ CK_EC_KDF_TYPE kdf;
+ CK_ULONG ulSharedDataLen;
+ CK_BYTE_PTR pSharedData;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+ CK_ULONG ulPrivateDataLen;
+ CK_OBJECT_HANDLE hPrivateData;
+ CK_ULONG ulPublicDataLen2;
+ CK_BYTE_PTR pPublicData2;
+ CK_OBJECT_HANDLE publicKey;
+} CK_ECMQV_DERIVE_PARAMS;
+
+typedef CK_ECMQV_DERIVE_PARAMS CK_PTR CK_ECMQV_DERIVE_PARAMS_PTR;
+
+/* Typedefs and defines for the CKM_X9_42_DH_KEY_PAIR_GEN and the
+ * CKM_X9_42_DH_PARAMETER_GEN mechanisms
+ */
+typedef CK_ULONG CK_X9_42_DH_KDF_TYPE;
+typedef CK_X9_42_DH_KDF_TYPE CK_PTR CK_X9_42_DH_KDF_TYPE_PTR;
+
+/* CK_X9_42_DH1_DERIVE_PARAMS provides the parameters to the
+ * CKM_X9_42_DH_DERIVE key derivation mechanism, where each party
+ * contributes one key pair
+ */
+typedef struct CK_X9_42_DH1_DERIVE_PARAMS {
+ CK_X9_42_DH_KDF_TYPE kdf;
+ CK_ULONG ulOtherInfoLen;
+ CK_BYTE_PTR pOtherInfo;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+} CK_X9_42_DH1_DERIVE_PARAMS;
+
+typedef struct CK_X9_42_DH1_DERIVE_PARAMS CK_PTR CK_X9_42_DH1_DERIVE_PARAMS_PTR;
+
+/* CK_X9_42_DH2_DERIVE_PARAMS provides the parameters to the
+ * CKM_X9_42_DH_HYBRID_DERIVE and CKM_X9_42_MQV_DERIVE key derivation
+ * mechanisms, where each party contributes two key pairs
+ */
+typedef struct CK_X9_42_DH2_DERIVE_PARAMS {
+ CK_X9_42_DH_KDF_TYPE kdf;
+ CK_ULONG ulOtherInfoLen;
+ CK_BYTE_PTR pOtherInfo;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+ CK_ULONG ulPrivateDataLen;
+ CK_OBJECT_HANDLE hPrivateData;
+ CK_ULONG ulPublicDataLen2;
+ CK_BYTE_PTR pPublicData2;
+} CK_X9_42_DH2_DERIVE_PARAMS;
+
+typedef CK_X9_42_DH2_DERIVE_PARAMS CK_PTR CK_X9_42_DH2_DERIVE_PARAMS_PTR;
+
+typedef struct CK_X9_42_MQV_DERIVE_PARAMS {
+ CK_X9_42_DH_KDF_TYPE kdf;
+ CK_ULONG ulOtherInfoLen;
+ CK_BYTE_PTR pOtherInfo;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+ CK_ULONG ulPrivateDataLen;
+ CK_OBJECT_HANDLE hPrivateData;
+ CK_ULONG ulPublicDataLen2;
+ CK_BYTE_PTR pPublicData2;
+ CK_OBJECT_HANDLE publicKey;
+} CK_X9_42_MQV_DERIVE_PARAMS;
+
+typedef CK_X9_42_MQV_DERIVE_PARAMS CK_PTR CK_X9_42_MQV_DERIVE_PARAMS_PTR;
+
+/* CK_KEA_DERIVE_PARAMS provides the parameters to the
+ * CKM_KEA_DERIVE mechanism
+ */
+typedef struct CK_KEA_DERIVE_PARAMS {
+ CK_BBOOL isSender;
+ CK_ULONG ulRandomLen;
+ CK_BYTE_PTR pRandomA;
+ CK_BYTE_PTR pRandomB;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+} CK_KEA_DERIVE_PARAMS;
+
+typedef CK_KEA_DERIVE_PARAMS CK_PTR CK_KEA_DERIVE_PARAMS_PTR;
+
+
+/* CK_RC2_PARAMS provides the parameters to the CKM_RC2_ECB and
+ * CKM_RC2_MAC mechanisms. An instance of CK_RC2_PARAMS just
+ * holds the effective keysize
+ */
+typedef CK_ULONG CK_RC2_PARAMS;
+
+typedef CK_RC2_PARAMS CK_PTR CK_RC2_PARAMS_PTR;
+
+
+/* CK_RC2_CBC_PARAMS provides the parameters to the CKM_RC2_CBC
+ * mechanism
+ */
+typedef struct CK_RC2_CBC_PARAMS {
+ CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */
+ CK_BYTE iv[8]; /* IV for CBC mode */
+} CK_RC2_CBC_PARAMS;
+
+typedef CK_RC2_CBC_PARAMS CK_PTR CK_RC2_CBC_PARAMS_PTR;
+
+
+/* CK_RC2_MAC_GENERAL_PARAMS provides the parameters for the
+ * CKM_RC2_MAC_GENERAL mechanism
+ */
+typedef struct CK_RC2_MAC_GENERAL_PARAMS {
+ CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */
+ CK_ULONG ulMacLength; /* Length of MAC in bytes */
+} CK_RC2_MAC_GENERAL_PARAMS;
+
+typedef CK_RC2_MAC_GENERAL_PARAMS CK_PTR \
+ CK_RC2_MAC_GENERAL_PARAMS_PTR;
+
+
+/* CK_RC5_PARAMS provides the parameters to the CKM_RC5_ECB and
+ * CKM_RC5_MAC mechanisms
+ */
+typedef struct CK_RC5_PARAMS {
+ CK_ULONG ulWordsize; /* wordsize in bits */
+ CK_ULONG ulRounds; /* number of rounds */
+} CK_RC5_PARAMS;
+
+typedef CK_RC5_PARAMS CK_PTR CK_RC5_PARAMS_PTR;
+
+
+/* CK_RC5_CBC_PARAMS provides the parameters to the CKM_RC5_CBC
+ * mechanism
+ */
+typedef struct CK_RC5_CBC_PARAMS {
+ CK_ULONG ulWordsize; /* wordsize in bits */
+ CK_ULONG ulRounds; /* number of rounds */
+ CK_BYTE_PTR pIv; /* pointer to IV */
+ CK_ULONG ulIvLen; /* length of IV in bytes */
+} CK_RC5_CBC_PARAMS;
+
+typedef CK_RC5_CBC_PARAMS CK_PTR CK_RC5_CBC_PARAMS_PTR;
+
+
+/* CK_RC5_MAC_GENERAL_PARAMS provides the parameters for the
+ * CKM_RC5_MAC_GENERAL mechanism
+ */
+typedef struct CK_RC5_MAC_GENERAL_PARAMS {
+ CK_ULONG ulWordsize; /* wordsize in bits */
+ CK_ULONG ulRounds; /* number of rounds */
+ CK_ULONG ulMacLength; /* Length of MAC in bytes */
+} CK_RC5_MAC_GENERAL_PARAMS;
+
+typedef CK_RC5_MAC_GENERAL_PARAMS CK_PTR \
+ CK_RC5_MAC_GENERAL_PARAMS_PTR;
+
+/* CK_MAC_GENERAL_PARAMS provides the parameters to most block
+ * ciphers' MAC_GENERAL mechanisms. Its value is the length of
+ * the MAC
+ */
+typedef CK_ULONG CK_MAC_GENERAL_PARAMS;
+
+typedef CK_MAC_GENERAL_PARAMS CK_PTR CK_MAC_GENERAL_PARAMS_PTR;
+
+typedef struct CK_DES_CBC_ENCRYPT_DATA_PARAMS {
+ CK_BYTE iv[8];
+ CK_BYTE_PTR pData;
+ CK_ULONG length;
+} CK_DES_CBC_ENCRYPT_DATA_PARAMS;
+
+typedef CK_DES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR;
+
+typedef struct CK_AES_CBC_ENCRYPT_DATA_PARAMS {
+ CK_BYTE iv[16];
+ CK_BYTE_PTR pData;
+ CK_ULONG length;
+} CK_AES_CBC_ENCRYPT_DATA_PARAMS;
+
+typedef CK_AES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR;
+
+/* CK_SKIPJACK_PRIVATE_WRAP_PARAMS provides the parameters to the
+ * CKM_SKIPJACK_PRIVATE_WRAP mechanism
+ */
+typedef struct CK_SKIPJACK_PRIVATE_WRAP_PARAMS {
+ CK_ULONG ulPasswordLen;
+ CK_BYTE_PTR pPassword;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pPublicData;
+ CK_ULONG ulPAndGLen;
+ CK_ULONG ulQLen;
+ CK_ULONG ulRandomLen;
+ CK_BYTE_PTR pRandomA;
+ CK_BYTE_PTR pPrimeP;
+ CK_BYTE_PTR pBaseG;
+ CK_BYTE_PTR pSubprimeQ;
+} CK_SKIPJACK_PRIVATE_WRAP_PARAMS;
+
+typedef CK_SKIPJACK_PRIVATE_WRAP_PARAMS CK_PTR \
+ CK_SKIPJACK_PRIVATE_WRAP_PARAMS_PTR;
+
+
+/* CK_SKIPJACK_RELAYX_PARAMS provides the parameters to the
+ * CKM_SKIPJACK_RELAYX mechanism
+ */
+typedef struct CK_SKIPJACK_RELAYX_PARAMS {
+ CK_ULONG ulOldWrappedXLen;
+ CK_BYTE_PTR pOldWrappedX;
+ CK_ULONG ulOldPasswordLen;
+ CK_BYTE_PTR pOldPassword;
+ CK_ULONG ulOldPublicDataLen;
+ CK_BYTE_PTR pOldPublicData;
+ CK_ULONG ulOldRandomLen;
+ CK_BYTE_PTR pOldRandomA;
+ CK_ULONG ulNewPasswordLen;
+ CK_BYTE_PTR pNewPassword;
+ CK_ULONG ulNewPublicDataLen;
+ CK_BYTE_PTR pNewPublicData;
+ CK_ULONG ulNewRandomLen;
+ CK_BYTE_PTR pNewRandomA;
+} CK_SKIPJACK_RELAYX_PARAMS;
+
+typedef CK_SKIPJACK_RELAYX_PARAMS CK_PTR \
+ CK_SKIPJACK_RELAYX_PARAMS_PTR;
+
+
+typedef struct CK_PBE_PARAMS {
+ CK_BYTE_PTR pInitVector;
+ CK_UTF8CHAR_PTR pPassword;
+ CK_ULONG ulPasswordLen;
+ CK_BYTE_PTR pSalt;
+ CK_ULONG ulSaltLen;
+ CK_ULONG ulIteration;
+} CK_PBE_PARAMS;
+
+typedef CK_PBE_PARAMS CK_PTR CK_PBE_PARAMS_PTR;
+
+
+/* CK_KEY_WRAP_SET_OAEP_PARAMS provides the parameters to the
+ * CKM_KEY_WRAP_SET_OAEP mechanism
+ */
+typedef struct CK_KEY_WRAP_SET_OAEP_PARAMS {
+ CK_BYTE bBC; /* block contents byte */
+ CK_BYTE_PTR pX; /* extra data */
+ CK_ULONG ulXLen; /* length of extra data in bytes */
+} CK_KEY_WRAP_SET_OAEP_PARAMS;
+
+typedef CK_KEY_WRAP_SET_OAEP_PARAMS CK_PTR CK_KEY_WRAP_SET_OAEP_PARAMS_PTR;
+
+typedef struct CK_SSL3_RANDOM_DATA {
+ CK_BYTE_PTR pClientRandom;
+ CK_ULONG ulClientRandomLen;
+ CK_BYTE_PTR pServerRandom;
+ CK_ULONG ulServerRandomLen;
+} CK_SSL3_RANDOM_DATA;
+
+
+typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS {
+ CK_SSL3_RANDOM_DATA RandomInfo;
+ CK_VERSION_PTR pVersion;
+} CK_SSL3_MASTER_KEY_DERIVE_PARAMS;
+
+typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS CK_PTR \
+ CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR;
+
+typedef struct CK_SSL3_KEY_MAT_OUT {
+ CK_OBJECT_HANDLE hClientMacSecret;
+ CK_OBJECT_HANDLE hServerMacSecret;
+ CK_OBJECT_HANDLE hClientKey;
+ CK_OBJECT_HANDLE hServerKey;
+ CK_BYTE_PTR pIVClient;
+ CK_BYTE_PTR pIVServer;
+} CK_SSL3_KEY_MAT_OUT;
+
+typedef CK_SSL3_KEY_MAT_OUT CK_PTR CK_SSL3_KEY_MAT_OUT_PTR;
+
+
+typedef struct CK_SSL3_KEY_MAT_PARAMS {
+ CK_ULONG ulMacSizeInBits;
+ CK_ULONG ulKeySizeInBits;
+ CK_ULONG ulIVSizeInBits;
+ CK_BBOOL bIsExport;
+ CK_SSL3_RANDOM_DATA RandomInfo;
+ CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial;
+} CK_SSL3_KEY_MAT_PARAMS;
+
+typedef CK_SSL3_KEY_MAT_PARAMS CK_PTR CK_SSL3_KEY_MAT_PARAMS_PTR;
+
+typedef struct CK_TLS_PRF_PARAMS {
+ CK_BYTE_PTR pSeed;
+ CK_ULONG ulSeedLen;
+ CK_BYTE_PTR pLabel;
+ CK_ULONG ulLabelLen;
+ CK_BYTE_PTR pOutput;
+ CK_ULONG_PTR pulOutputLen;
+} CK_TLS_PRF_PARAMS;
+
+typedef CK_TLS_PRF_PARAMS CK_PTR CK_TLS_PRF_PARAMS_PTR;
+
+typedef struct CK_WTLS_RANDOM_DATA {
+ CK_BYTE_PTR pClientRandom;
+ CK_ULONG ulClientRandomLen;
+ CK_BYTE_PTR pServerRandom;
+ CK_ULONG ulServerRandomLen;
+} CK_WTLS_RANDOM_DATA;
+
+typedef CK_WTLS_RANDOM_DATA CK_PTR CK_WTLS_RANDOM_DATA_PTR;
+
+typedef struct CK_WTLS_MASTER_KEY_DERIVE_PARAMS {
+ CK_MECHANISM_TYPE DigestMechanism;
+ CK_WTLS_RANDOM_DATA RandomInfo;
+ CK_BYTE_PTR pVersion;
+} CK_WTLS_MASTER_KEY_DERIVE_PARAMS;
+
+typedef CK_WTLS_MASTER_KEY_DERIVE_PARAMS CK_PTR \
+ CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR;
+
+typedef struct CK_WTLS_PRF_PARAMS {
+ CK_MECHANISM_TYPE DigestMechanism;
+ CK_BYTE_PTR pSeed;
+ CK_ULONG ulSeedLen;
+ CK_BYTE_PTR pLabel;
+ CK_ULONG ulLabelLen;
+ CK_BYTE_PTR pOutput;
+ CK_ULONG_PTR pulOutputLen;
+} CK_WTLS_PRF_PARAMS;
+
+typedef CK_WTLS_PRF_PARAMS CK_PTR CK_WTLS_PRF_PARAMS_PTR;
+
+typedef struct CK_WTLS_KEY_MAT_OUT {
+ CK_OBJECT_HANDLE hMacSecret;
+ CK_OBJECT_HANDLE hKey;
+ CK_BYTE_PTR pIV;
+} CK_WTLS_KEY_MAT_OUT;
+
+typedef CK_WTLS_KEY_MAT_OUT CK_PTR CK_WTLS_KEY_MAT_OUT_PTR;
+
+typedef struct CK_WTLS_KEY_MAT_PARAMS {
+ CK_MECHANISM_TYPE DigestMechanism;
+ CK_ULONG ulMacSizeInBits;
+ CK_ULONG ulKeySizeInBits;
+ CK_ULONG ulIVSizeInBits;
+ CK_ULONG ulSequenceNumber;
+ CK_BBOOL bIsExport;
+ CK_WTLS_RANDOM_DATA RandomInfo;
+ CK_WTLS_KEY_MAT_OUT_PTR pReturnedKeyMaterial;
+} CK_WTLS_KEY_MAT_PARAMS;
+
+typedef CK_WTLS_KEY_MAT_PARAMS CK_PTR CK_WTLS_KEY_MAT_PARAMS_PTR;
+
+typedef struct CK_CMS_SIG_PARAMS {
+ CK_OBJECT_HANDLE certificateHandle;
+ CK_MECHANISM_PTR pSigningMechanism;
+ CK_MECHANISM_PTR pDigestMechanism;
+ CK_UTF8CHAR_PTR pContentType;
+ CK_BYTE_PTR pRequestedAttributes;
+ CK_ULONG ulRequestedAttributesLen;
+ CK_BYTE_PTR pRequiredAttributes;
+ CK_ULONG ulRequiredAttributesLen;
+} CK_CMS_SIG_PARAMS;
+
+typedef CK_CMS_SIG_PARAMS CK_PTR CK_CMS_SIG_PARAMS_PTR;
+
+typedef struct CK_KEY_DERIVATION_STRING_DATA {
+ CK_BYTE_PTR pData;
+ CK_ULONG ulLen;
+} CK_KEY_DERIVATION_STRING_DATA;
+
+typedef CK_KEY_DERIVATION_STRING_DATA CK_PTR \
+ CK_KEY_DERIVATION_STRING_DATA_PTR;
+
+
+/* The CK_EXTRACT_PARAMS is used for the
+ * CKM_EXTRACT_KEY_FROM_KEY mechanism. It specifies which bit
+ * of the base key should be used as the first bit of the
+ * derived key
+ */
+typedef CK_ULONG CK_EXTRACT_PARAMS;
+
+typedef CK_EXTRACT_PARAMS CK_PTR CK_EXTRACT_PARAMS_PTR;
+
+/* CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE is used to
+ * indicate the Pseudo-Random Function (PRF) used to generate
+ * key bits using PKCS #5 PBKDF2.
+ */
+typedef CK_ULONG CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE;
+
+typedef CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE CK_PTR \
+ CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR;
+
+#define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL
+#define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL
+#define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL
+#define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL
+#define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL
+#define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL
+#define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL
+#define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL
+
+/* CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE is used to indicate the
+ * source of the salt value when deriving a key using PKCS #5
+ * PBKDF2.
+ */
+typedef CK_ULONG CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE;
+
+typedef CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE CK_PTR \
+ CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR;
+
+/* The following salt value sources are defined in PKCS #5 v2.0. */
+#define CKZ_SALT_SPECIFIED 0x00000001UL
+
+/* CK_PKCS5_PBKD2_PARAMS is a structure that provides the
+ * parameters to the CKM_PKCS5_PBKD2 mechanism.
+ */
+typedef struct CK_PKCS5_PBKD2_PARAMS {
+ CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource;
+ CK_VOID_PTR pSaltSourceData;
+ CK_ULONG ulSaltSourceDataLen;
+ CK_ULONG iterations;
+ CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf;
+ CK_VOID_PTR pPrfData;
+ CK_ULONG ulPrfDataLen;
+ CK_UTF8CHAR_PTR pPassword;
+ CK_ULONG_PTR ulPasswordLen;
+} CK_PKCS5_PBKD2_PARAMS;
+
+typedef CK_PKCS5_PBKD2_PARAMS CK_PTR CK_PKCS5_PBKD2_PARAMS_PTR;
+
+/* CK_PKCS5_PBKD2_PARAMS2 is a corrected version of the CK_PKCS5_PBKD2_PARAMS
+ * structure that provides the parameters to the CKM_PKCS5_PBKD2 mechanism
+ * noting that the ulPasswordLen field is a CK_ULONG and not a CK_ULONG_PTR.
+ */
+typedef struct CK_PKCS5_PBKD2_PARAMS2 {
+ CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource;
+ CK_VOID_PTR pSaltSourceData;
+ CK_ULONG ulSaltSourceDataLen;
+ CK_ULONG iterations;
+ CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf;
+ CK_VOID_PTR pPrfData;
+ CK_ULONG ulPrfDataLen;
+ CK_UTF8CHAR_PTR pPassword;
+ CK_ULONG ulPasswordLen;
+} CK_PKCS5_PBKD2_PARAMS2;
+
+typedef CK_PKCS5_PBKD2_PARAMS2 CK_PTR CK_PKCS5_PBKD2_PARAMS2_PTR;
+
+typedef CK_ULONG CK_OTP_PARAM_TYPE;
+typedef CK_OTP_PARAM_TYPE CK_PARAM_TYPE; /* backward compatibility */
+
+typedef struct CK_OTP_PARAM {
+ CK_OTP_PARAM_TYPE type;
+ CK_VOID_PTR pValue;
+ CK_ULONG ulValueLen;
+} CK_OTP_PARAM;
+
+typedef CK_OTP_PARAM CK_PTR CK_OTP_PARAM_PTR;
+
+typedef struct CK_OTP_PARAMS {
+ CK_OTP_PARAM_PTR pParams;
+ CK_ULONG ulCount;
+} CK_OTP_PARAMS;
+
+typedef CK_OTP_PARAMS CK_PTR CK_OTP_PARAMS_PTR;
+
+typedef struct CK_OTP_SIGNATURE_INFO {
+ CK_OTP_PARAM_PTR pParams;
+ CK_ULONG ulCount;
+} CK_OTP_SIGNATURE_INFO;
+
+typedef CK_OTP_SIGNATURE_INFO CK_PTR CK_OTP_SIGNATURE_INFO_PTR;
+
+#define CK_OTP_VALUE 0UL
+#define CK_OTP_PIN 1UL
+#define CK_OTP_CHALLENGE 2UL
+#define CK_OTP_TIME 3UL
+#define CK_OTP_COUNTER 4UL
+#define CK_OTP_FLAGS 5UL
+#define CK_OTP_OUTPUT_LENGTH 6UL
+#define CK_OTP_OUTPUT_FORMAT 7UL
+
+#define CKF_NEXT_OTP 0x00000001UL
+#define CKF_EXCLUDE_TIME 0x00000002UL
+#define CKF_EXCLUDE_COUNTER 0x00000004UL
+#define CKF_EXCLUDE_CHALLENGE 0x00000008UL
+#define CKF_EXCLUDE_PIN 0x00000010UL
+#define CKF_USER_FRIENDLY_OTP 0x00000020UL
+
+typedef struct CK_KIP_PARAMS {
+ CK_MECHANISM_PTR pMechanism;
+ CK_OBJECT_HANDLE hKey;
+ CK_BYTE_PTR pSeed;
+ CK_ULONG ulSeedLen;
+} CK_KIP_PARAMS;
+
+typedef CK_KIP_PARAMS CK_PTR CK_KIP_PARAMS_PTR;
+
+typedef struct CK_AES_CTR_PARAMS {
+ CK_ULONG ulCounterBits;
+ CK_BYTE cb[16];
+} CK_AES_CTR_PARAMS;
+
+typedef CK_AES_CTR_PARAMS CK_PTR CK_AES_CTR_PARAMS_PTR;
+
+typedef struct CK_GCM_PARAMS {
+ CK_BYTE_PTR pIv;
+ CK_ULONG ulIvLen;
+ CK_ULONG ulIvBits;
+ CK_BYTE_PTR pAAD;
+ CK_ULONG ulAADLen;
+ CK_ULONG ulTagBits;
+} CK_GCM_PARAMS;
+
+typedef CK_GCM_PARAMS CK_PTR CK_GCM_PARAMS_PTR;
+
+typedef struct CK_CCM_PARAMS {
+ CK_ULONG ulDataLen;
+ CK_BYTE_PTR pNonce;
+ CK_ULONG ulNonceLen;
+ CK_BYTE_PTR pAAD;
+ CK_ULONG ulAADLen;
+ CK_ULONG ulMACLen;
+} CK_CCM_PARAMS;
+
+typedef CK_CCM_PARAMS CK_PTR CK_CCM_PARAMS_PTR;
+
+/* Deprecated. Use CK_GCM_PARAMS */
+typedef struct CK_AES_GCM_PARAMS {
+ CK_BYTE_PTR pIv;
+ CK_ULONG ulIvLen;
+ CK_ULONG ulIvBits;
+ CK_BYTE_PTR pAAD;
+ CK_ULONG ulAADLen;
+ CK_ULONG ulTagBits;
+} CK_AES_GCM_PARAMS;
+
+typedef CK_AES_GCM_PARAMS CK_PTR CK_AES_GCM_PARAMS_PTR;
+
+/* Deprecated. Use CK_CCM_PARAMS */
+typedef struct CK_AES_CCM_PARAMS {
+ CK_ULONG ulDataLen;
+ CK_BYTE_PTR pNonce;
+ CK_ULONG ulNonceLen;
+ CK_BYTE_PTR pAAD;
+ CK_ULONG ulAADLen;
+ CK_ULONG ulMACLen;
+} CK_AES_CCM_PARAMS;
+
+typedef CK_AES_CCM_PARAMS CK_PTR CK_AES_CCM_PARAMS_PTR;
+
+typedef struct CK_CAMELLIA_CTR_PARAMS {
+ CK_ULONG ulCounterBits;
+ CK_BYTE cb[16];
+} CK_CAMELLIA_CTR_PARAMS;
+
+typedef CK_CAMELLIA_CTR_PARAMS CK_PTR CK_CAMELLIA_CTR_PARAMS_PTR;
+
+typedef struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS {
+ CK_BYTE iv[16];
+ CK_BYTE_PTR pData;
+ CK_ULONG length;
+} CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS;
+
+typedef CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \
+ CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR;
+
+typedef struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS {
+ CK_BYTE iv[16];
+ CK_BYTE_PTR pData;
+ CK_ULONG length;
+} CK_ARIA_CBC_ENCRYPT_DATA_PARAMS;
+
+typedef CK_ARIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \
+ CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR;
+
+typedef struct CK_DSA_PARAMETER_GEN_PARAM {
+ CK_MECHANISM_TYPE hash;
+ CK_BYTE_PTR pSeed;
+ CK_ULONG ulSeedLen;
+ CK_ULONG ulIndex;
+} CK_DSA_PARAMETER_GEN_PARAM;
+
+typedef CK_DSA_PARAMETER_GEN_PARAM CK_PTR CK_DSA_PARAMETER_GEN_PARAM_PTR;
+
+typedef struct CK_ECDH_AES_KEY_WRAP_PARAMS {
+ CK_ULONG ulAESKeyBits;
+ CK_EC_KDF_TYPE kdf;
+ CK_ULONG ulSharedDataLen;
+ CK_BYTE_PTR pSharedData;
+} CK_ECDH_AES_KEY_WRAP_PARAMS;
+
+typedef CK_ECDH_AES_KEY_WRAP_PARAMS CK_PTR CK_ECDH_AES_KEY_WRAP_PARAMS_PTR;
+
+typedef CK_ULONG CK_JAVA_MIDP_SECURITY_DOMAIN;
+
+typedef CK_ULONG CK_CERTIFICATE_CATEGORY;
+
+typedef struct CK_RSA_AES_KEY_WRAP_PARAMS {
+ CK_ULONG ulAESKeyBits;
+ CK_RSA_PKCS_OAEP_PARAMS_PTR pOAEPParams;
+} CK_RSA_AES_KEY_WRAP_PARAMS;
+
+typedef CK_RSA_AES_KEY_WRAP_PARAMS CK_PTR CK_RSA_AES_KEY_WRAP_PARAMS_PTR;
+
+typedef struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS {
+ CK_SSL3_RANDOM_DATA RandomInfo;
+ CK_VERSION_PTR pVersion;
+ CK_MECHANISM_TYPE prfHashMechanism;
+} CK_TLS12_MASTER_KEY_DERIVE_PARAMS;
+
+typedef CK_TLS12_MASTER_KEY_DERIVE_PARAMS CK_PTR \
+ CK_TLS12_MASTER_KEY_DERIVE_PARAMS_PTR;
+
+typedef struct CK_TLS12_KEY_MAT_PARAMS {
+ CK_ULONG ulMacSizeInBits;
+ CK_ULONG ulKeySizeInBits;
+ CK_ULONG ulIVSizeInBits;
+ CK_BBOOL bIsExport;
+ CK_SSL3_RANDOM_DATA RandomInfo;
+ CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial;
+ CK_MECHANISM_TYPE prfHashMechanism;
+} CK_TLS12_KEY_MAT_PARAMS;
+
+typedef CK_TLS12_KEY_MAT_PARAMS CK_PTR CK_TLS12_KEY_MAT_PARAMS_PTR;
+
+typedef struct CK_TLS_KDF_PARAMS {
+ CK_MECHANISM_TYPE prfMechanism;
+ CK_BYTE_PTR pLabel;
+ CK_ULONG ulLabelLength;
+ CK_SSL3_RANDOM_DATA RandomInfo;
+ CK_BYTE_PTR pContextData;
+ CK_ULONG ulContextDataLength;
+} CK_TLS_KDF_PARAMS;
+
+typedef CK_TLS_KDF_PARAMS CK_PTR CK_TLS_KDF_PARAMS_PTR;
+
+typedef struct CK_TLS_MAC_PARAMS {
+ CK_MECHANISM_TYPE prfHashMechanism;
+ CK_ULONG ulMacLength;
+ CK_ULONG ulServerOrClient;
+} CK_TLS_MAC_PARAMS;
+
+typedef CK_TLS_MAC_PARAMS CK_PTR CK_TLS_MAC_PARAMS_PTR;
+
+typedef struct CK_GOSTR3410_DERIVE_PARAMS {
+ CK_EC_KDF_TYPE kdf;
+ CK_BYTE_PTR pPublicData;
+ CK_ULONG ulPublicDataLen;
+ CK_BYTE_PTR pUKM;
+ CK_ULONG ulUKMLen;
+} CK_GOSTR3410_DERIVE_PARAMS;
+
+typedef CK_GOSTR3410_DERIVE_PARAMS CK_PTR CK_GOSTR3410_DERIVE_PARAMS_PTR;
+
+typedef struct CK_GOSTR3410_KEY_WRAP_PARAMS {
+ CK_BYTE_PTR pWrapOID;
+ CK_ULONG ulWrapOIDLen;
+ CK_BYTE_PTR pUKM;
+ CK_ULONG ulUKMLen;
+ CK_OBJECT_HANDLE hKey;
+} CK_GOSTR3410_KEY_WRAP_PARAMS;
+
+typedef CK_GOSTR3410_KEY_WRAP_PARAMS CK_PTR CK_GOSTR3410_KEY_WRAP_PARAMS_PTR;
+
+typedef struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS {
+ CK_BYTE iv[16];
+ CK_BYTE_PTR pData;
+ CK_ULONG length;
+} CK_SEED_CBC_ENCRYPT_DATA_PARAMS;
+
+typedef CK_SEED_CBC_ENCRYPT_DATA_PARAMS CK_PTR \
+ CK_SEED_CBC_ENCRYPT_DATA_PARAMS_PTR;
+
+#endif /* _PKCS11T_H_ */
+
diff --git a/contrib/restricted/aws/aws-c-io/source/pkcs11_private.h b/contrib/restricted/aws/aws-c-io/source/pkcs11_private.h
new file mode 100644
index 0000000000..8eeca4604f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/source/pkcs11_private.h
@@ -0,0 +1,167 @@
+#ifndef AWS_IO_PKCS11_PRIVATE_H
+#define AWS_IO_PKCS11_PRIVATE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/io/io.h>
+
+/* These defines must exist before the official PKCS#11 headers are included */
+#define CK_PTR *
+#define NULL_PTR 0
+#define CK_DEFINE_FUNCTION(returnType, name) returnType name
+#define CK_DECLARE_FUNCTION(returnType, name) returnType name
+#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType(CK_PTR name)
+#define CK_CALLBACK_FUNCTION(returnType, name) returnType(CK_PTR name)
+#include "pkcs11/v2.40/pkcs11.h"
+
+/**
+ * pkcs11_private.h
+ * This file declares symbols that are private to aws-c-io but need to be
+ * accessed from multiple .c files.
+ *
+ * NOTE: Not putting this file under `include/private/...` like we usually
+ * do with private headers because it breaks aws-crt-swift. Swift was trying
+ * to compile each file under include/, but the official PKCS#11 header files
+ * are too weird break it.
+ */
+
+struct aws_pkcs11_lib;
+struct aws_string;
+
+enum aws_tls_hash_algorithm {
+ AWS_TLS_HASH_UNKNOWN = -1,
+ AWS_TLS_HASH_SHA1,
+ AWS_TLS_HASH_SHA224,
+ AWS_TLS_HASH_SHA256,
+ AWS_TLS_HASH_SHA384,
+ AWS_TLS_HASH_SHA512,
+};
+
+enum aws_tls_signature_algorithm {
+ AWS_TLS_SIGNATURE_UNKNOWN = -1,
+ AWS_TLS_SIGNATURE_RSA,
+ AWS_TLS_SIGNATURE_ECDSA,
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Return c-string for PKCS#11 CKR_* constant.
+ * For use in tests only.
+ */
+AWS_IO_API
+const char *aws_pkcs11_ckr_str(CK_RV rv);
+
+/**
+ * Return the raw function list.
+ * For use in tests only.
+ */
+AWS_IO_API
+CK_FUNCTION_LIST *aws_pkcs11_lib_get_function_list(struct aws_pkcs11_lib *pkcs11_lib);
+
+/**
+ * Find the slot that meets all criteria:
+ * - has a token
+ * - if match_slot_id is non-null, then slot IDs must match
+ * - if match_token_label is non-null, then labels must match
+ * The function fails unless it finds exactly one slot meeting all criteria.
+ */
+AWS_IO_API
+int aws_pkcs11_lib_find_slot_with_token(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ const uint64_t *match_slot_id,
+ const struct aws_string *match_token_label,
+ CK_SLOT_ID *out_slot_id);
+
+AWS_IO_API
+int aws_pkcs11_lib_open_session(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SLOT_ID slot_id,
+ CK_SESSION_HANDLE *out_session_handle);
+
+AWS_IO_API
+void aws_pkcs11_lib_close_session(struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle);
+
+AWS_IO_API
+int aws_pkcs11_lib_login_user(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ const struct aws_string *optional_user_pin);
+
+/**
+ * Find the object that meets all criteria:
+ * - is private key
+ * - if match_label is non-null, then labels must match
+ * The function fails unless it finds exactly one object meeting all criteria.
+ */
+AWS_IO_API
+int aws_pkcs11_lib_find_private_key(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ const struct aws_string *match_label,
+ CK_OBJECT_HANDLE *out_key_handle,
+ CK_KEY_TYPE *out_key_type);
+
+/**
+ * Decrypt the encrypted data.
+ * out_data should be passed in uninitialized.
+ * If successful, out_data will be initialized and contain the recovered data.
+ */
+AWS_IO_API
+int aws_pkcs11_lib_decrypt(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ CK_OBJECT_HANDLE key_handle,
+ CK_KEY_TYPE key_type,
+ struct aws_byte_cursor encrypted_data,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *out_data);
+
+/**
+ * Sign a digest with the private key during TLS negotiation.
+ * out_signature should be passed in uninitialized.
+ * If successful, out_signature will be initialized and contain the signature.
+ */
+AWS_IO_API
+int aws_pkcs11_lib_sign(
+ struct aws_pkcs11_lib *pkcs11_lib,
+ CK_SESSION_HANDLE session_handle,
+ CK_OBJECT_HANDLE key_handle,
+ CK_KEY_TYPE key_type,
+ struct aws_byte_cursor digest_data,
+ struct aws_allocator *allocator,
+ enum aws_tls_hash_algorithm digest_alg,
+ enum aws_tls_signature_algorithm signature_alg,
+ struct aws_byte_buf *out_signature);
+
+/**
+ * Get the DER encoded DigestInfo value to be prefixed to the hash, used for RSA signing
+ * See https://tools.ietf.org/html/rfc3447#page-43
+ */
+AWS_IO_API
+int aws_get_prefix_to_rsa_sig(enum aws_tls_hash_algorithm digest_alg, struct aws_byte_cursor *out_prefix);
+
+/**
+ * ASN.1 DER encode a big unsigned integer. Note that the source integer may be zero padded. It may also have
+ * most significant bit set. The encoded format is canonical and unambiguous - that is, most significant
+ * bit is never set.
+ */
+AWS_IO_API
+int aws_pkcs11_asn1_enc_ubigint(struct aws_byte_buf *const buffer, struct aws_byte_cursor bigint);
+
+/**
+ * Given enum, return string like: AWS_TLS_HASH_SHA256 -> "SHA256"
+ */
+AWS_IO_API
+const char *aws_tls_hash_algorithm_str(enum aws_tls_hash_algorithm hash);
+
+/**
+ * Given enum, return string like: AWS_TLS_SIGNATURE_RSA -> "RSA"
+ */
+AWS_IO_API
+const char *aws_tls_signature_algorithm_str(enum aws_tls_signature_algorithm signature);
+
+AWS_EXTERN_C_END
+#endif /* AWS_IO_PKCS11_PRIVATE_H */
diff --git a/contrib/restricted/aws/aws-c-io/source/pki_utils.c b/contrib/restricted/aws/aws-c-io/source/pki_utils.c
index e8b3719089..8deb0aabcf 100644
--- a/contrib/restricted/aws/aws-c-io/source/pki_utils.c
+++ b/contrib/restricted/aws/aws-c-io/source/pki_utils.c
@@ -2,7 +2,7 @@
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
-#include <aws/io/pki_utils.h>
+#include <aws/io/private/pki_utils.h>
#include <aws/common/encoding.h>
diff --git a/contrib/restricted/aws/aws-c-io/source/posix/file_utils.c b/contrib/restricted/aws/aws-c-io/source/posix/file_utils.c
deleted file mode 100644
index fcb96260eb..0000000000
--- a/contrib/restricted/aws/aws-c-io/source/posix/file_utils.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/io/file_utils.h>
-
-#include <aws/common/environment.h>
-#include <aws/common/string.h>
-
-#include <errno.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-char aws_get_platform_directory_separator(void) {
- return '/';
-}
-
-AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME");
-
-struct aws_string *aws_get_home_directory(struct aws_allocator *allocator) {
-
- /* ToDo: check getpwuid_r if environment check fails */
- struct aws_string *home_env_var_value = NULL;
- if (aws_get_environment_value(allocator, s_home_env_var, &home_env_var_value) == 0 && home_env_var_value != NULL) {
- return home_env_var_value;
- }
-
- return NULL;
-}
-
-bool aws_path_exists(const char *path) {
- struct stat buffer;
- return stat(path, &buffer) == 0;
-}
-
-int aws_fseek(FILE *file, aws_off_t offset, int whence) {
-
- int result =
-#if _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L
- fseeko(file, offset, whence);
-#else
- fseek(file, offset, whence);
-#endif
-
- if (result != 0) {
- return aws_translate_and_raise_io_error(errno);
- }
-
- return AWS_OP_SUCCESS;
-}
-
-int aws_file_get_length(FILE *file, int64_t *length) {
-
- struct stat file_stats;
-
- int fd = fileno(file);
- if (fd == -1) {
- return aws_raise_error(AWS_IO_INVALID_FILE_HANDLE);
- }
-
- if (fstat(fd, &file_stats)) {
- return aws_translate_and_raise_io_error(errno);
- }
-
- *length = file_stats.st_size;
-
- return AWS_OP_SUCCESS;
-}
diff --git a/contrib/restricted/aws/aws-c-io/source/posix/host_resolver.c b/contrib/restricted/aws/aws-c-io/source/posix/host_resolver.c
index 6594723bb8..3cf934b489 100644
--- a/contrib/restricted/aws/aws-c-io/source/posix/host_resolver.c
+++ b/contrib/restricted/aws/aws-c-io/source/posix/host_resolver.c
@@ -31,7 +31,7 @@ int aws_default_dns_resolve(
AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr);
/* Android would prefer NO HINTS IF YOU DON'T MIND, SIR */
-#ifdef ANDROID
+#if defined(ANDROID)
int err_code = getaddrinfo(hostname_cstr, NULL, NULL, &result);
#else
struct addrinfo hints;
@@ -44,7 +44,8 @@ int aws_default_dns_resolve(
#endif
if (err_code) {
- AWS_LOGF_ERROR(AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d", err_code);
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d: %s", err_code, gai_strerror(err_code));
goto clean_up;
}
diff --git a/contrib/restricted/aws/aws-c-io/source/posix/socket.c b/contrib/restricted/aws/aws-c-io/source/posix/socket.c
index 5f11cdff52..788ddaa986 100644
--- a/contrib/restricted/aws/aws-c-io/source/posix/socket.c
+++ b/contrib/restricted/aws/aws-c-io/source/posix/socket.c
@@ -22,11 +22,16 @@
#include <sys/types.h>
#include <unistd.h>
+/*
+ * On OsX, suppress NoPipe signals via flags to setsockopt()
+ * On Linux, suppress NoPipe signals via flags to send()
+ */
#if defined(__MACH__)
-# define NO_SIGNAL SO_NOSIGPIPE
+# define NO_SIGNAL_SOCK_OPT SO_NOSIGPIPE
+# define NO_SIGNAL_SEND 0
# define TCP_KEEPIDLE TCP_KEEPALIVE
#else
-# define NO_SIGNAL MSG_NOSIGNAL
+# define NO_SIGNAL_SEND MSG_NOSIGNAL
#endif
/* This isn't defined on ancient linux distros (breaking the builds).
@@ -160,15 +165,26 @@ struct posix_socket_connect_args {
struct posix_socket {
struct aws_linked_list write_queue;
+ struct aws_linked_list written_queue;
+ struct aws_task written_task;
struct posix_socket_connect_args *connect_args;
- bool write_in_progress;
+ /* Note that only the posix_socket impl part is refcounted.
+ * The public aws_socket can be a stack variable and cleaned up synchronously
+ * (by blocking until the event-loop cleans up the impl part).
+ * In hindsight, aws_socket should have been heap-allocated and refcounted, but alas */
+ struct aws_ref_count internal_refcount;
+ struct aws_allocator *allocator;
+ bool written_task_scheduled;
bool currently_subscribed;
bool continue_accept;
- bool currently_in_event;
- bool clean_yourself_up;
bool *close_happened;
};
+static void s_socket_destroy_impl(void *user_data) {
+ struct posix_socket *socket_impl = user_data;
+ aws_mem_release(socket_impl->allocator, socket_impl);
+}
+
static int s_socket_init(
struct aws_socket *socket,
struct aws_allocator *alloc,
@@ -204,11 +220,11 @@ static int s_socket_init(
}
aws_linked_list_init(&posix_socket->write_queue);
- posix_socket->write_in_progress = false;
+ aws_linked_list_init(&posix_socket->written_queue);
posix_socket->currently_subscribed = false;
posix_socket->continue_accept = false;
- posix_socket->currently_in_event = false;
- posix_socket->clean_yourself_up = false;
+ aws_ref_count_init(&posix_socket->internal_refcount, posix_socket, s_socket_destroy_impl);
+ posix_socket->allocator = alloc;
posix_socket->connect_args = NULL;
posix_socket->close_happened = NULL;
socket->impl = posix_socket;
@@ -225,22 +241,22 @@ void aws_socket_clean_up(struct aws_socket *socket) {
/* protect from double clean */
return;
}
+
+ int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */
+ (void)fd_for_logging;
+
if (aws_socket_is_open(socket)) {
- AWS_LOGF_DEBUG(
- AWS_LS_IO_SOCKET, "id=%p fd=%d: is still open, closing...", (void *)socket, socket->io_handle.data.fd);
+ AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: is still open, closing...", (void *)socket, fd_for_logging);
aws_socket_close(socket);
}
struct posix_socket *socket_impl = socket->impl;
- if (!socket_impl->currently_in_event) {
- aws_mem_release(socket->allocator, socket->impl);
- } else {
+ if (aws_ref_count_release(&socket_impl->internal_refcount) != 0) {
AWS_LOGF_DEBUG(
AWS_LS_IO_SOCKET,
"id=%p fd=%d: is still pending io letting it dangle and cleaning up later.",
(void *)socket,
- socket->io_handle.data.fd);
- socket_impl->clean_yourself_up = true;
+ fd_for_logging);
}
AWS_ZERO_STRUCT(*socket);
@@ -1083,12 +1099,14 @@ int aws_socket_stop_accept(struct aws_socket *socket) {
AWS_LS_IO_SOCKET, "id=%p fd=%d: stopping accepting new connections", (void *)socket, socket->io_handle.data.fd);
if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) {
- struct stop_accept_args args = {.mutex = AWS_MUTEX_INIT,
- .condition_variable = AWS_CONDITION_VARIABLE_INIT,
- .invoked = false,
- .socket = socket,
- .ret_code = AWS_OP_SUCCESS,
- .task = {.fn = s_stop_accept_task}};
+ struct stop_accept_args args = {
+ .mutex = AWS_MUTEX_INIT,
+ .condition_variable = AWS_CONDITION_VARIABLE_INIT,
+ .invoked = false,
+ .socket = socket,
+ .ret_code = AWS_OP_SUCCESS,
+ .task = {.fn = s_stop_accept_task},
+ };
AWS_LOGF_INFO(
AWS_LS_IO_SOCKET,
"id=%p fd=%d: stopping accepting new connections from a different thread than "
@@ -1145,18 +1163,18 @@ int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_op
socket->options = *options;
+#ifdef NO_SIGNAL_SOCK_OPT
int option_value = 1;
- if (AWS_UNLIKELY(
- setsockopt(socket->io_handle.data.fd, SOL_SOCKET, NO_SIGNAL, &option_value, sizeof(option_value)))) {
+ if (AWS_UNLIKELY(setsockopt(
+ socket->io_handle.data.fd, SOL_SOCKET, NO_SIGNAL_SOCK_OPT, &option_value, sizeof(option_value)))) {
AWS_LOGF_WARN(
AWS_LS_IO_SOCKET,
- "id=%p fd=%d: setsockopt() for NO_SIGNAL failed with errno %d. If you are having SIGPIPE signals thrown, "
- "you may"
- " want to install a signal trap in your application layer.",
+ "id=%p fd=%d: setsockopt() for NO_SIGNAL_SOCK_OPT failed with errno %d.",
(void *)socket,
socket->io_handle.data.fd,
errno);
}
+#endif /* NO_SIGNAL_SOCK_OPT */
int reuse = 1;
if (AWS_UNLIKELY(setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(int)))) {
@@ -1229,6 +1247,7 @@ struct write_request {
void *write_user_data;
struct aws_linked_list_node node;
size_t original_buffer_len;
+ int error_code;
};
struct posix_socket_close_args {
@@ -1264,6 +1283,7 @@ static void s_close_task(struct aws_task *task, void *arg, enum aws_task_status
int aws_socket_close(struct aws_socket *socket) {
struct posix_socket *socket_impl = socket->impl;
AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: closing", (void *)socket, socket->io_handle.data.fd);
+ struct aws_event_loop *event_loop = socket->event_loop;
if (socket->event_loop) {
/* don't freak out on me, this almost never happens, and never occurs inside a channel
* it only gets hit from a listening socket shutting down or from a unit test. */
@@ -1292,12 +1312,14 @@ int aws_socket_close(struct aws_socket *socket) {
.arg = &args,
};
+ int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */
+ (void)fd_for_logging;
+
aws_mutex_lock(&args.mutex);
aws_event_loop_schedule_task_now(socket->event_loop, &close_task);
aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_close_predicate, &args);
aws_mutex_unlock(&args.mutex);
- AWS_LOGF_INFO(
- AWS_LS_IO_SOCKET, "id=%p fd=%d: close task completed.", (void *)socket, socket->io_handle.data.fd);
+ AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: close task completed.", (void *)socket, fd_for_logging);
if (args.ret_code) {
return aws_raise_error(args.ret_code);
}
@@ -1334,14 +1356,25 @@ int aws_socket_close(struct aws_socket *socket) {
socket->io_handle.data.fd = -1;
socket->state = CLOSED;
- /* after close, just go ahead and clear out the pending writes queue
- * and tell the user they were cancelled. */
+ /* ensure callbacks for pending writes fire (in order) before this close function returns */
+
+ if (socket_impl->written_task_scheduled) {
+ aws_event_loop_cancel_task(event_loop, &socket_impl->written_task);
+ }
+
+ while (!aws_linked_list_empty(&socket_impl->written_queue)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue);
+ struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
+ size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len;
+ write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data);
+ aws_mem_release(socket->allocator, write_request);
+ }
+
while (!aws_linked_list_empty(&socket_impl->write_queue)) {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue);
struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
-
- write_request->written_fn(
- socket, AWS_IO_SOCKET_CLOSED, write_request->original_buffer_len, write_request->write_user_data);
+ size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len;
+ write_request->written_fn(socket, AWS_IO_SOCKET_CLOSED, bytes_written, write_request->write_user_data);
aws_mem_release(socket->allocator, write_request);
}
}
@@ -1367,21 +1400,53 @@ int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_directio
return AWS_OP_SUCCESS;
}
+static void s_written_task(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ (void)status;
+
+ struct aws_socket *socket = arg;
+ struct posix_socket *socket_impl = socket->impl;
+
+ socket_impl->written_task_scheduled = false;
+
+ /* this is to handle a race condition when a callback kicks off a cleanup, or the user decides
+ * to close the socket based on something they read (SSL validation failed for example).
+ * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling */
+ aws_ref_count_acquire(&socket_impl->internal_refcount);
+
+ /* Notes about weird loop:
+ * 1) Only process the initial contents of queue when this task is run,
+ * ignoring any writes queued during delivery.
+ * If we simply looped until the queue was empty, we could get into a
+ * synchronous loop of completing and writing and completing and writing...
+ * and it would be tough for multiple sockets to share an event-loop fairly.
+ * 2) Check if queue is empty with each iteration.
+ * If user calls close() from the callback, close() will process all
+ * nodes in the written_queue, and the queue will be empty when the
+ * callstack gets back to here. */
+ if (!aws_linked_list_empty(&socket_impl->written_queue)) {
+ struct aws_linked_list_node *stop_after = aws_linked_list_back(&socket_impl->written_queue);
+ do {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue);
+ struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
+ size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len;
+ write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data);
+ aws_mem_release(socket_impl->allocator, write_request);
+ if (node == stop_after) {
+ break;
+ }
+ } while (!aws_linked_list_empty(&socket_impl->written_queue));
+ }
+
+ aws_ref_count_release(&socket_impl->internal_refcount);
+}
+
/* this gets called in two scenarios.
* 1st scenario, someone called aws_socket_write() and we want to try writing now, so an error can be returned
* immediately if something bad has happened to the socket. In this case, `parent_request` is set.
* 2nd scenario, the event loop notified us that the socket went writable. In this case `parent_request` is NULL */
static int s_process_write_requests(struct aws_socket *socket, struct write_request *parent_request) {
struct posix_socket *socket_impl = socket->impl;
- struct aws_allocator *allocator = socket->allocator;
-
- AWS_LOGF_TRACE(
- AWS_LS_IO_SOCKET, "id=%p fd=%d: processing write requests.", (void *)socket, socket->io_handle.data.fd);
-
- /* there's a potential deadlock where we notify the user that we wrote some data, the user
- * says, "cool, now I can write more and then immediately calls aws_socket_write(). We need to make sure
- * that we don't allow reentrancy in that case. */
- socket_impl->write_in_progress = true;
if (parent_request) {
AWS_LOGF_TRACE(
@@ -1389,7 +1454,6 @@ static int s_process_write_requests(struct aws_socket *socket, struct write_requ
"id=%p fd=%d: processing write requests, called from aws_socket_write",
(void *)socket,
socket->io_handle.data.fd);
- socket_impl->currently_in_event = true;
} else {
AWS_LOGF_TRACE(
AWS_LS_IO_SOCKET,
@@ -1401,6 +1465,7 @@ static int s_process_write_requests(struct aws_socket *socket, struct write_requ
bool purge = false;
int aws_error = AWS_OP_SUCCESS;
bool parent_request_failed = false;
+ bool pushed_to_written_queue = false;
/* if a close call happens in the middle, this queue will have been cleaned out from under us. */
while (!aws_linked_list_empty(&socket_impl->write_queue)) {
@@ -1415,8 +1480,8 @@ static int s_process_write_requests(struct aws_socket *socket, struct write_requ
(unsigned long long)write_request->original_buffer_len,
(unsigned long long)write_request->cursor_cpy.len);
- ssize_t written =
- send(socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, NO_SIGNAL);
+ ssize_t written = send(
+ socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, NO_SIGNAL_SEND);
AWS_LOGF_TRACE(
AWS_LS_IO_SOCKET,
@@ -1472,9 +1537,9 @@ static int s_process_write_requests(struct aws_socket *socket, struct write_requ
AWS_LS_IO_SOCKET, "id=%p fd=%d: write request completed", (void *)socket, socket->io_handle.data.fd);
aws_linked_list_remove(node);
- write_request->written_fn(
- socket, AWS_OP_SUCCESS, write_request->original_buffer_len, write_request->write_user_data);
- aws_mem_release(allocator, write_request);
+ write_request->error_code = AWS_ERROR_SUCCESS;
+ aws_linked_list_push_back(&socket_impl->written_queue, node);
+ pushed_to_written_queue = true;
}
}
@@ -1487,22 +1552,19 @@ static int s_process_write_requests(struct aws_socket *socket, struct write_requ
* as the user will be able to rely on the return value from aws_socket_write() */
if (write_request == parent_request) {
parent_request_failed = true;
+ aws_mem_release(socket->allocator, write_request);
} else {
- write_request->written_fn(socket, aws_error, 0, write_request->write_user_data);
+ write_request->error_code = aws_error;
+ aws_linked_list_push_back(&socket_impl->written_queue, node);
+ pushed_to_written_queue = true;
}
-
- aws_mem_release(socket->allocator, write_request);
}
}
- socket_impl->write_in_progress = false;
-
- if (parent_request) {
- socket_impl->currently_in_event = false;
- }
-
- if (socket_impl->clean_yourself_up) {
- aws_mem_release(allocator, socket_impl);
+ if (pushed_to_written_queue && !socket_impl->written_task_scheduled) {
+ socket_impl->written_task_scheduled = true;
+ aws_task_init(&socket_impl->written_task, s_written_task, socket, "socket_written_task");
+ aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task);
}
/* Only report error if aws_socket_write() invoked this function and its write_request failed */
@@ -1521,15 +1583,14 @@ static void s_on_socket_io_event(
void *user_data) {
(void)event_loop;
(void)handle;
- /* this is to handle a race condition when an error kicks off a cleanup, or the user decides
- * to close the socket based on something they read (SSL validation failed for example).
- * if clean_up happens when currently_in_event is true, socket_impl is kept dangling but currently
- * subscribed is set to false. */
struct aws_socket *socket = user_data;
struct posix_socket *socket_impl = socket->impl;
- struct aws_allocator *allocator = socket->allocator;
- socket_impl->currently_in_event = true;
+ /* this is to handle a race condition when an error kicks off a cleanup, or the user decides
+ * to close the socket based on something they read (SSL validation failed for example).
+ * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling but currently
+ * subscribed is set to false. */
+ aws_ref_count_acquire(&socket_impl->internal_refcount);
if (events & AWS_IO_EVENT_TYPE_REMOTE_HANG_UP || events & AWS_IO_EVENT_TYPE_CLOSED) {
aws_raise_error(AWS_IO_SOCKET_CLOSED);
@@ -1565,11 +1626,7 @@ static void s_on_socket_io_event(
}
end_check:
- socket_impl->currently_in_event = false;
-
- if (socket_impl->clean_yourself_up) {
- aws_mem_release(allocator, socket_impl);
- }
+ aws_ref_count_release(&socket_impl->internal_refcount);
}
int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) {
@@ -1666,6 +1723,8 @@ int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size
}
ssize_t read_val = read(socket->io_handle.data.fd, buffer->buffer + buffer->len, buffer->capacity - buffer->len);
+ int error = errno;
+
AWS_LOGF_TRACE(
AWS_LS_IO_SOCKET, "id=%p fd=%d: read of %d", (void *)socket, socket->io_handle.data.fd, (int)read_val);
@@ -1688,7 +1747,6 @@ int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size
return AWS_OP_SUCCESS;
}
- int error = errno;
#if defined(EWOULDBLOCK)
if (error == EAGAIN || error == EWOULDBLOCK) {
#else
@@ -1749,12 +1807,7 @@ int aws_socket_write(
write_request->cursor_cpy = *cursor;
aws_linked_list_push_back(&socket_impl->write_queue, &write_request->node);
- /* avoid reentrancy when a user calls write after receiving their completion callback. */
- if (!socket_impl->write_in_progress) {
- return s_process_write_requests(socket, write_request);
- }
-
- return AWS_OP_SUCCESS;
+ return s_process_write_requests(socket, write_request);
}
int aws_socket_get_error(struct aws_socket *socket) {
diff --git a/contrib/restricted/aws/aws-c-io/source/retry_strategy.c b/contrib/restricted/aws/aws-c-io/source/retry_strategy.c
index 69d444482b..ff1288d142 100644
--- a/contrib/restricted/aws/aws-c-io/source/retry_strategy.c
+++ b/contrib/restricted/aws/aws-c-io/source/retry_strategy.c
@@ -9,10 +9,12 @@ void aws_retry_strategy_acquire(struct aws_retry_strategy *retry_strategy) {
}
void aws_retry_strategy_release(struct aws_retry_strategy *retry_strategy) {
- size_t ref_count = aws_atomic_fetch_sub_explicit(&retry_strategy->ref_count, 1, aws_memory_order_seq_cst);
+ if (retry_strategy) {
+ size_t ref_count = aws_atomic_fetch_sub_explicit(&retry_strategy->ref_count, 1, aws_memory_order_seq_cst);
- if (ref_count == 1) {
- retry_strategy->vtable->destroy(retry_strategy);
+ if (ref_count == 1) {
+ retry_strategy->vtable->destroy(retry_strategy);
+ }
}
}
@@ -39,7 +41,7 @@ int aws_retry_strategy_schedule_retry(
return token->retry_strategy->vtable->schedule_retry(token, error_type, retry_ready, user_data);
}
-int aws_retry_strategy_token_record_success(struct aws_retry_token *token) {
+int aws_retry_token_record_success(struct aws_retry_token *token) {
AWS_PRECONDITION(token);
AWS_PRECONDITION(token->retry_strategy);
AWS_PRECONDITION(token->retry_strategy->vtable->record_success);
@@ -47,11 +49,19 @@ int aws_retry_strategy_token_record_success(struct aws_retry_token *token) {
return token->retry_strategy->vtable->record_success(token);
}
-void aws_retry_strategy_release_retry_token(struct aws_retry_token *token) {
+void aws_retry_token_acquire(struct aws_retry_token *token) {
+ aws_atomic_fetch_add_explicit(&token->ref_count, 1u, aws_memory_order_relaxed);
+}
+
+void aws_retry_token_release(struct aws_retry_token *token) {
if (token) {
AWS_PRECONDITION(token->retry_strategy);
AWS_PRECONDITION(token->retry_strategy->vtable->release_token);
- token->retry_strategy->vtable->release_token(token);
+ size_t prev_count = aws_atomic_fetch_sub_explicit(&token->ref_count, 1u, aws_memory_order_seq_cst);
+
+ if (prev_count == 1u) {
+ token->retry_strategy->vtable->release_token(token);
+ }
}
}
diff --git a/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c b/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c
index 9300125423..e0e776361a 100644
--- a/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c
+++ b/contrib/restricted/aws/aws-c-io/source/s2n/s2n_tls_channel_handler.c
@@ -4,14 +4,20 @@
*/
#include <aws/io/tls_channel_handler.h>
+#include <aws/common/clock.h>
+#include <aws/common/mutex.h>
+
#include <aws/io/channel.h>
#include <aws/io/event_loop.h>
#include <aws/io/file_utils.h>
#include <aws/io/logging.h>
-#include <aws/io/pki_utils.h>
+#include <aws/io/pkcs11.h>
+#include <aws/io/private/pki_utils.h>
#include <aws/io/private/tls_channel_handler_shared.h>
#include <aws/io/statistics.h>
+#include "../pkcs11_private.h"
+
#include <aws/common/encoding.h>
#include <aws/common/string.h>
#include <aws/common/task_scheduler.h>
@@ -32,10 +38,17 @@
static const char *s_default_ca_dir = NULL;
static const char *s_default_ca_file = NULL;
+struct s2n_delayed_shutdown_task {
+ struct aws_channel_task task;
+ struct aws_channel_slot *slot;
+ int error;
+};
+
struct s2n_handler {
struct aws_channel_handler handler;
struct aws_tls_channel_handler_shared shared_state;
struct s2n_connection *connection;
+ struct s2n_ctx *s2n_ctx;
struct aws_channel_slot *slot;
struct aws_linked_list input_queue;
struct aws_byte_buf protocol;
@@ -48,67 +61,108 @@ struct s2n_handler {
aws_tls_on_error_fn *on_error;
void *user_data;
bool advertise_alpn_message;
- bool negotiation_finished;
+ enum {
+ NEGOTIATION_ONGOING,
+ NEGOTIATION_FAILED,
+ NEGOTIATION_SUCCEEDED,
+ } state;
+ struct s2n_delayed_shutdown_task delayed_shutdown_task;
+ struct aws_channel_task async_pkey_task;
};
struct s2n_ctx {
struct aws_tls_ctx ctx;
struct s2n_config *s2n_config;
+
+ /* Only used in special circumstances (ex: have cert but no key, because key is in PKCS#11) */
+ struct s2n_cert_chain_and_key *custom_cert_chain_and_key;
+
+ /* Use a single PKCS#11 session for all TLS connections on this s2n_ctx.
+ * We do this because PKCS#11 tokens may only support a
+ * limited number of sessions (PKCS11-UG-v2.40 section 2.6.7).
+ * If this one shared session turns out to be a severe bottleneck,
+ * we could look into other setups (ex: put session on its own thread,
+ * 1 session per event-loop, 1 session per connection, etc).
+ *
+ * The lock must be held while performing session operations.
+ * Otherwise, it would not be safe for multiple threads to share a
+ * session (PKCS11-UG-v2.40 section 2.6.7). The lock isn't needed for
+ * setup and teardown though, since we ensure nothing parallel is going
+ * on at these times */
+ struct {
+ struct aws_pkcs11_lib *lib;
+ struct aws_mutex session_lock;
+ CK_SESSION_HANDLE session_handle;
+ CK_OBJECT_HANDLE private_key_handle;
+ CK_KEY_TYPE private_key_type;
+ } pkcs11;
};
+AWS_STATIC_STRING_FROM_LITERAL(s_debian_path, "/etc/ssl/certs");
+AWS_STATIC_STRING_FROM_LITERAL(s_rhel_path, "/etc/pki/tls/certs");
+AWS_STATIC_STRING_FROM_LITERAL(s_android_path, "/system/etc/security/cacerts");
+AWS_STATIC_STRING_FROM_LITERAL(s_free_bsd_path, "/usr/local/share/certs");
+AWS_STATIC_STRING_FROM_LITERAL(s_net_bsd_path, "/etc/openssl/certs");
+
static const char *s_determine_default_pki_dir(void) {
/* debian variants */
- if (aws_path_exists("/etc/ssl/certs")) {
- return "/etc/ssl/certs";
+ if (aws_path_exists(s_debian_path)) {
+ return aws_string_c_str(s_debian_path);
}
/* RHEL variants */
- if (aws_path_exists("/etc/pki/tls/certs")) {
- return "/etc/pki/tls/certs";
+ if (aws_path_exists(s_rhel_path)) {
+ return aws_string_c_str(s_rhel_path);
}
/* android */
- if (aws_path_exists("/system/etc/security/cacerts")) {
- return "/system/etc/security/cacerts";
+ if (aws_path_exists(s_android_path)) {
+ return aws_string_c_str(s_android_path);
}
/* Free BSD */
- if (aws_path_exists("/usr/local/share/certs")) {
- return "/usr/local/share/certs";
+ if (aws_path_exists(s_free_bsd_path)) {
+ return aws_string_c_str(s_free_bsd_path);
}
/* Net BSD */
- if (aws_path_exists("/etc/openssl/certs")) {
- return "/etc/openssl/certs";
+ if (aws_path_exists(s_net_bsd_path)) {
+ return aws_string_c_str(s_net_bsd_path);
}
return NULL;
}
+AWS_STATIC_STRING_FROM_LITERAL(s_debian_ca_file_path, "/etc/ssl/certs/ca-certificates.crt");
+AWS_STATIC_STRING_FROM_LITERAL(s_old_rhel_ca_file_path, "/etc/pki/tls/certs/ca-bundle.crt");
+AWS_STATIC_STRING_FROM_LITERAL(s_open_suse_ca_file_path, "/etc/ssl/ca-bundle.pem");
+AWS_STATIC_STRING_FROM_LITERAL(s_open_elec_ca_file_path, "/etc/pki/tls/cacert.pem");
+AWS_STATIC_STRING_FROM_LITERAL(s_modern_rhel_ca_file_path, "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem");
+
static const char *s_determine_default_pki_ca_file(void) {
/* debian variants */
- if (aws_path_exists("/etc/ssl/certs/ca-certificates.crt")) {
- return "/etc/ssl/certs/ca-certificates.crt";
+ if (aws_path_exists(s_debian_ca_file_path)) {
+ return aws_string_c_str(s_debian_ca_file_path);
}
/* Old RHEL variants */
- if (aws_path_exists("/etc/pki/tls/certs/ca-bundle.crt")) {
- return "/etc/pki/tls/certs/ca-bundle.crt";
+ if (aws_path_exists(s_old_rhel_ca_file_path)) {
+ return aws_string_c_str(s_old_rhel_ca_file_path);
}
/* Open SUSE */
- if (aws_path_exists("/etc/ssl/ca-bundle.pem")) {
- return "/etc/ssl/ca-bundle.pem";
+ if (aws_path_exists(s_open_suse_ca_file_path)) {
+ return aws_string_c_str(s_open_suse_ca_file_path);
}
/* Open ELEC */
- if (aws_path_exists("/etc/pki/tls/cacert.pem")) {
- return "/etc/pki/tls/cacert.pem";
+ if (aws_path_exists(s_open_elec_ca_file_path)) {
+ return aws_string_c_str(s_open_elec_ca_file_path);
}
/* Modern RHEL variants */
- if (aws_path_exists("/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem")) {
- return "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem";
+ if (aws_path_exists(s_modern_rhel_ca_file_path)) {
+ return aws_string_c_str(s_modern_rhel_ca_file_path);
}
return NULL;
@@ -120,7 +174,18 @@ void aws_tls_init_static_state(struct aws_allocator *alloc) {
setenv("S2N_ENABLE_CLIENT_MODE", "1", 1);
setenv("S2N_DONT_MLOCK", "1", 1);
- s2n_init();
+
+ /* Disable atexit behavior, so that s2n_cleanup() fully cleans things up.
+ *
+ * By default, s2n uses an ataexit handler and doesn't fully clean up until the program exits.
+ * This can cause a crash if s2n is compiled into a shared library and
+ * that library is unloaded before the appexit handler runs. */
+ s2n_disable_atexit();
+
+ if (s2n_init() != S2N_SUCCESS) {
+ fprintf(stderr, "s2n_init() failed: %d (%s)\n", s2n_errno, s2n_strerror(s2n_errno, "EN"));
+ AWS_FATAL_ASSERT(0 && "s2n_init() failed");
+ }
s_default_ca_dir = s_determine_default_pki_dir();
s_default_ca_file = s_determine_default_pki_ca_file();
@@ -150,6 +215,7 @@ bool aws_tls_is_cipher_pref_supported(enum aws_tls_cipher_pref cipher_pref) {
case AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_02:
case AWS_IO_TLS_CIPHER_PREF_KMS_PQ_SIKE_TLSv1_0_2020_02:
case AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_07:
+ case AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05:
return true;
#endif
@@ -207,20 +273,19 @@ static int s_generic_send(struct s2n_handler *handler, struct aws_byte_buf *buf)
size_t processed = 0;
while (processed < buf->len) {
+ const size_t overhead = aws_channel_slot_upstream_message_overhead(handler->slot);
+ const size_t message_size_hint = (buf->len - processed) + overhead;
struct aws_io_message *message = aws_channel_acquire_message_from_pool(
- handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, buf->len - processed);
+ handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, message_size_hint);
- if (!message) {
+ if (!message || message->message_data.capacity <= overhead) {
errno = ENOMEM;
return -1;
}
- const size_t overhead = aws_channel_slot_upstream_message_overhead(handler->slot);
- const size_t available_msg_write_capacity = buffer_cursor.len - overhead;
-
- const size_t to_write = message->message_data.capacity > available_msg_write_capacity
- ? available_msg_write_capacity
- : message->message_data.capacity;
+ const size_t available_msg_write_capacity = message->message_data.capacity - overhead;
+ const size_t to_write =
+ available_msg_write_capacity >= buffer_cursor.len ? buffer_cursor.len : available_msg_write_capacity;
struct aws_byte_cursor chunk = aws_byte_cursor_advance(&buffer_cursor, to_write);
if (aws_byte_buf_append(&message->message_data, &chunk)) {
@@ -262,7 +327,12 @@ static void s_s2n_handler_destroy(struct aws_channel_handler *handler) {
if (handler) {
struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl;
aws_tls_channel_handler_shared_clean_up(&s2n_handler->shared_state);
- s2n_connection_free(s2n_handler->connection);
+ if (s2n_handler->connection) {
+ s2n_connection_free(s2n_handler->connection);
+ }
+ if (s2n_handler->s2n_ctx) {
+ aws_tls_ctx_release(&s2n_handler->s2n_ctx->ctx);
+ }
aws_mem_release(handler->alloc, (void *)s2n_handler);
}
}
@@ -285,6 +355,8 @@ static void s_on_negotiation_result(
static int s_drive_negotiation(struct aws_channel_handler *handler) {
struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl;
+ AWS_ASSERT(s2n_handler->state == NEGOTIATION_ONGOING);
+
aws_on_drive_tls_negotiation(&s2n_handler->shared_state);
s2n_blocked_status blocked = S2N_NOT_BLOCKED;
@@ -293,7 +365,7 @@ static int s_drive_negotiation(struct aws_channel_handler *handler) {
int s2n_error = s2n_errno;
if (negotiation_code == S2N_ERR_T_OK) {
- s2n_handler->negotiation_finished = true;
+ s2n_handler->state = NEGOTIATION_SUCCEEDED;
const char *protocol = s2n_get_application_protocol(s2n_handler->connection);
if (protocol) {
@@ -348,7 +420,7 @@ static int s_drive_negotiation(struct aws_channel_handler *handler) {
const char *err_str = s2n_strerror_debug(s2n_error, NULL);
(void)err_str;
- s2n_handler->negotiation_finished = false;
+ s2n_handler->state = NEGOTIATION_FAILED;
aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE);
@@ -368,7 +440,10 @@ static void s_negotiation_task(struct aws_channel_task *task, void *arg, aws_tas
if (status == AWS_TASK_STATUS_RUN_READY) {
struct aws_channel_handler *handler = arg;
- s_drive_negotiation(handler);
+ struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl;
+ if (s2n_handler->state == NEGOTIATION_ONGOING) {
+ s_drive_negotiation(handler);
+ }
}
}
@@ -377,7 +452,10 @@ int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler
AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Kicking off TLS negotiation.", (void *)handler)
if (aws_channel_thread_is_callers_thread(s2n_handler->slot->channel)) {
- return s_drive_negotiation(handler);
+ if (s2n_handler->state == NEGOTIATION_ONGOING) {
+ s_drive_negotiation(handler);
+ }
+ return AWS_OP_SUCCESS;
}
aws_channel_task_init(
@@ -394,10 +472,14 @@ static int s_s2n_handler_process_read_message(
struct s2n_handler *s2n_handler = handler->impl;
+ if (AWS_UNLIKELY(s2n_handler->state == NEGOTIATION_FAILED)) {
+ return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE);
+ }
+
if (message) {
aws_linked_list_push_back(&s2n_handler->input_queue, &message->queueing_handle);
- if (!s2n_handler->negotiation_finished) {
+ if (s2n_handler->state == NEGOTIATION_ONGOING) {
size_t message_len = message->message_data.len;
if (!s_drive_negotiation(handler)) {
aws_channel_slot_increment_read_window(slot, message_len);
@@ -486,7 +568,7 @@ static int s_s2n_handler_process_write_message(
(void)slot;
struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl;
- if (AWS_UNLIKELY(!s2n_handler->negotiation_finished)) {
+ if (AWS_UNLIKELY(s2n_handler->state != NEGOTIATION_SUCCEEDED)) {
return aws_raise_error(AWS_IO_TLS_ERROR_NOT_NEGOTIATED);
}
@@ -510,6 +592,274 @@ static int s_s2n_handler_process_write_message(
return AWS_OP_SUCCESS;
}
+static void s_delayed_shutdown_task_fn(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+ (void)channel_task;
+
+ struct aws_channel_handler *handler = arg;
+ struct s2n_handler *s2n_handler = handler->impl;
+
+ if (status == AWS_TASK_STATUS_RUN_READY) {
+ AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Delayed shut down in write direction", (void *)handler)
+ s2n_blocked_status blocked;
+ /* make a best effort, but the channel is going away after this run, so.... you only get one shot anyways */
+ s2n_shutdown(s2n_handler->connection, &blocked);
+ }
+ aws_channel_slot_on_handler_shutdown_complete(
+ s2n_handler->delayed_shutdown_task.slot,
+ AWS_CHANNEL_DIR_WRITE,
+ s2n_handler->delayed_shutdown_task.error,
+ false);
+}
+
+static enum aws_tls_signature_algorithm s_s2n_to_aws_signature_algorithm(s2n_tls_signature_algorithm s2n_alg) {
+ switch (s2n_alg) {
+ case S2N_TLS_SIGNATURE_RSA:
+ return AWS_TLS_SIGNATURE_RSA;
+ case S2N_TLS_SIGNATURE_ECDSA:
+ return AWS_TLS_SIGNATURE_ECDSA;
+ default:
+ return AWS_TLS_SIGNATURE_UNKNOWN;
+ }
+}
+
+static enum aws_tls_hash_algorithm s_s2n_to_aws_hash_algorithm(s2n_tls_hash_algorithm s2n_alg) {
+ switch (s2n_alg) {
+ case (S2N_TLS_HASH_SHA1):
+ return AWS_TLS_HASH_SHA1;
+ case (S2N_TLS_HASH_SHA224):
+ return AWS_TLS_HASH_SHA224;
+ case (S2N_TLS_HASH_SHA256):
+ return AWS_TLS_HASH_SHA256;
+ case (S2N_TLS_HASH_SHA384):
+ return AWS_TLS_HASH_SHA384;
+ case (S2N_TLS_HASH_SHA512):
+ return AWS_TLS_HASH_SHA512;
+ default:
+ return AWS_TLS_HASH_UNKNOWN;
+ }
+}
+
+/* This task performs the PKCS#11 private key operations.
+ * This task is scheduled because the s2n async private key operation is not allowed to complete synchronously */
+static void s_s2n_pkcs11_async_pkey_task(
+ struct aws_channel_task *channel_task,
+ void *arg,
+ enum aws_task_status status) {
+
+ struct s2n_handler *s2n_handler = AWS_CONTAINER_OF(channel_task, struct s2n_handler, async_pkey_task);
+ struct aws_channel_handler *handler = &s2n_handler->handler;
+ struct s2n_async_pkey_op *op = arg;
+ bool success = false;
+
+ uint8_t *input_data = NULL; /* allocated later */
+ struct aws_byte_buf output_buf; /* initialized later */
+ AWS_ZERO_STRUCT(output_buf);
+
+ /* if things started failing since this task was scheduled, just clean up and bail out */
+ if (status != AWS_TASK_STATUS_RUN_READY || s2n_handler->state != NEGOTIATION_ONGOING) {
+ goto clean_up;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Running PKCS#11 async pkey task", (void *)handler);
+
+ /* We check all s2n_async_pkey_op functions for success,
+ * but they shouldn't fail if they're called correctly.
+ * Even if the output is bad, the failure will happen later in s2n_negotiate() */
+
+ uint32_t input_size = 0;
+ if (s2n_async_pkey_op_get_input_size(op, &input_size)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed querying s2n async pkey op size", (void *)handler);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ input_data = aws_mem_acquire(handler->alloc, input_size);
+ if (s2n_async_pkey_op_get_input(op, input_data, input_size)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed querying s2n async pkey input", (void *)handler);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+ struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input_data, input_size);
+
+ s2n_async_pkey_op_type op_type = 0;
+ if (s2n_async_pkey_op_get_op_type(op, &op_type)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed querying s2n async pkey op type", (void *)handler);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ /* Gather additional information if this is a SIGN operation */
+ enum aws_tls_signature_algorithm aws_sign_alg = 0;
+ enum aws_tls_hash_algorithm aws_digest_alg = 0;
+ if (op_type == S2N_ASYNC_SIGN) {
+ s2n_tls_signature_algorithm s2n_sign_alg = 0;
+ if (s2n_connection_get_selected_client_cert_signature_algorithm(s2n_handler->connection, &s2n_sign_alg)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed getting s2n client cert signature algorithm", (void *)handler);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ aws_sign_alg = s_s2n_to_aws_signature_algorithm(s2n_sign_alg);
+ if (aws_sign_alg == AWS_TLS_SIGNATURE_UNKNOWN) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "id=%p: Cannot sign with s2n_tls_signature_algorithm=%d. Algorithm currently unsupported",
+ (void *)handler,
+ s2n_sign_alg);
+ aws_raise_error(AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED);
+ goto error;
+ }
+
+ s2n_tls_hash_algorithm s2n_digest_alg = 0;
+ if (s2n_connection_get_selected_client_cert_digest_algorithm(s2n_handler->connection, &s2n_digest_alg)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed getting s2n client cert digest algorithm", (void *)handler);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ aws_digest_alg = s_s2n_to_aws_hash_algorithm(s2n_digest_alg);
+ if (aws_digest_alg == AWS_TLS_HASH_UNKNOWN) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "id=%p: Cannot sign digest created with s2n_tls_hash_algorithm=%d. Algorithm currently unsupported",
+ (void *)handler,
+ s2n_digest_alg);
+ aws_raise_error(AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED);
+ goto error;
+ }
+ }
+
+ /*********** BEGIN CRITICAL SECTION ***********/
+ aws_mutex_lock(&s2n_handler->s2n_ctx->pkcs11.session_lock);
+ bool success_while_locked = false;
+
+ switch (op_type) {
+ case S2N_ASYNC_DECRYPT:
+ if (aws_pkcs11_lib_decrypt(
+ s2n_handler->s2n_ctx->pkcs11.lib,
+ s2n_handler->s2n_ctx->pkcs11.session_handle,
+ s2n_handler->s2n_ctx->pkcs11.private_key_handle,
+ s2n_handler->s2n_ctx->pkcs11.private_key_type,
+ input_cursor,
+ handler->alloc,
+ &output_buf)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "id=%p: PKCS#11 decrypt failed, error %s",
+ (void *)handler,
+ aws_error_name(aws_last_error()));
+ goto unlock;
+ }
+ break;
+
+ case S2N_ASYNC_SIGN:
+ if (aws_pkcs11_lib_sign(
+ s2n_handler->s2n_ctx->pkcs11.lib,
+ s2n_handler->s2n_ctx->pkcs11.session_handle,
+ s2n_handler->s2n_ctx->pkcs11.private_key_handle,
+ s2n_handler->s2n_ctx->pkcs11.private_key_type,
+ input_cursor,
+ handler->alloc,
+ aws_digest_alg,
+ aws_sign_alg,
+ &output_buf)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "id=%p: PKCS#11 sign failed, error %s",
+ (void *)handler,
+ aws_error_name(aws_last_error()));
+ goto unlock;
+ }
+ break;
+
+ default:
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Unknown s2n_async_pkey_op_type:%d", (void *)handler, (int)op_type);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto unlock;
+ }
+
+ success_while_locked = true;
+unlock:
+ aws_mutex_unlock(&s2n_handler->s2n_ctx->pkcs11.session_lock);
+ /*********** END CRITICAL SECTION ***********/
+
+ if (!success_while_locked) {
+ goto error;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_TLS, "id=%p: PKCS#11 operation complete. output-size:%zu", (void *)handler, output_buf.len);
+
+ if (s2n_async_pkey_op_set_output(op, output_buf.buffer, output_buf.len)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed setting output on s2n async pkey op", (void *)handler);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ if (s2n_async_pkey_op_apply(op, s2n_handler->connection)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed applying s2n async pkey op", (void *)handler);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ /* Success! */
+ success = true;
+ goto clean_up;
+
+error:
+ aws_channel_shutdown(s2n_handler->slot->channel, aws_last_error());
+
+clean_up:
+ s2n_async_pkey_op_free(op);
+ aws_mem_release(handler->alloc, input_data);
+ aws_byte_buf_clean_up(&output_buf);
+
+ if (success) {
+ s_drive_negotiation(handler);
+ }
+}
+
+static int s_s2n_pkcs11_async_pkey_callback(struct s2n_connection *conn, struct s2n_async_pkey_op *op) {
+ struct s2n_handler *s2n_handler = s2n_connection_get_ctx(conn);
+ struct aws_channel_handler *handler = &s2n_handler->handler;
+
+ AWS_ASSERT(conn == s2n_handler->connection);
+ (void)conn;
+
+ /* Schedule a task to do the work.
+ * s2n can't deal with the async private key operation completing synchronously, so we can't just do it now */
+ AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: async pkey callback received, scheduling PKCS#11 task", (void *)handler);
+
+ aws_channel_task_init(&s2n_handler->async_pkey_task, s_s2n_pkcs11_async_pkey_task, op, "s2n_pkcs11_async_pkey_op");
+ aws_channel_schedule_task_now(s2n_handler->slot->channel, &s2n_handler->async_pkey_task);
+
+ return S2N_SUCCESS;
+}
+
+static int s_s2n_do_delayed_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ int error_code) {
+ struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl;
+
+ s2n_handler->delayed_shutdown_task.slot = slot;
+ s2n_handler->delayed_shutdown_task.error = error_code;
+
+ uint64_t shutdown_delay = s2n_connection_get_delay(s2n_handler->connection);
+ uint64_t now = 0;
+
+ if (aws_channel_current_clock_time(slot->channel, &now)) {
+ return AWS_OP_ERR;
+ }
+
+ uint64_t shutdown_time = aws_add_u64_saturating(shutdown_delay, now);
+ aws_channel_schedule_task_future(slot->channel, &s2n_handler->delayed_shutdown_task.task, shutdown_time);
+
+ return AWS_OP_SUCCESS;
+}
+
static int s_s2n_handler_shutdown(
struct aws_channel_handler *handler,
struct aws_channel_slot *slot,
@@ -520,15 +870,20 @@ static int s_s2n_handler_shutdown(
if (dir == AWS_CHANNEL_DIR_WRITE) {
if (!abort_immediately && error_code != AWS_IO_SOCKET_CLOSED) {
- AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Shutting down write direction", (void *)handler)
- s2n_blocked_status blocked;
- /* make a best effort, but the channel is going away after this run, so.... you only get one shot anyways */
- s2n_shutdown(s2n_handler->connection, &blocked);
+ AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Scheduling delayed write direction shutdown", (void *)handler)
+ if (s_s2n_do_delayed_shutdown(handler, slot, error_code) == AWS_OP_SUCCESS) {
+ return AWS_OP_SUCCESS;
+ }
}
} else {
AWS_LOGF_DEBUG(
AWS_LS_IO_TLS, "id=%p: Shutting down read direction with error code %d", (void *)handler, error_code);
+ /* If negotiation hasn't succeeded yet, it's certainly not going to succeed now */
+ if (s2n_handler->state == NEGOTIATION_ONGOING) {
+ s2n_handler->state = NEGOTIATION_FAILED;
+ }
+
while (!aws_linked_list_empty(&s2n_handler->input_queue)) {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&s2n_handler->input_queue);
struct aws_io_message *message = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle);
@@ -577,7 +932,7 @@ static int s_s2n_handler_increment_read_window(
aws_channel_slot_increment_read_window(slot, window_update_size);
}
- if (s2n_handler->negotiation_finished && !s2n_handler->sequential_tasks.node.next) {
+ if (s2n_handler->state == NEGOTIATION_SUCCEEDED && !s2n_handler->sequential_tasks.node.next) {
/* TLS requires full records before it can decrypt anything. As a result we need to check everything we've
* buffered instead of just waiting on a read from the socket, or we'll hit a deadlock.
*
@@ -684,9 +1039,11 @@ static size_t s_tl_cleanup_key = 0; /* Address of variable serves as key in hash
* This local object is added to the table of every event loop that has a (s2n) tls connection
* added to it at some point in time
*/
-static struct aws_event_loop_local_object s_tl_cleanup_object = {.key = &s_tl_cleanup_key,
- .object = NULL,
- .on_object_removed = NULL};
+static struct aws_event_loop_local_object s_tl_cleanup_object = {
+ .key = &s_tl_cleanup_key,
+ .object = NULL,
+ .on_object_removed = NULL,
+};
static void s_aws_cleanup_s2n_thread_local_state(void *user_data) {
(void)user_data;
@@ -724,23 +1081,22 @@ static struct aws_channel_handler *s_new_tls_handler(
AWS_ASSERT(options->ctx);
struct s2n_handler *s2n_handler = aws_mem_calloc(allocator, 1, sizeof(struct s2n_handler));
- if (!s2n_handler) {
- return NULL;
- }
+ s2n_handler->handler.impl = s2n_handler;
+ s2n_handler->handler.alloc = allocator;
+ s2n_handler->handler.vtable = &s_handler_vtable;
+ s2n_handler->handler.slot = slot;
+
+ aws_tls_ctx_acquire(options->ctx);
+ s2n_handler->s2n_ctx = options->ctx->impl;
- struct s2n_ctx *s2n_ctx = (struct s2n_ctx *)options->ctx->impl;
s2n_handler->connection = s2n_connection_new(mode);
if (!s2n_handler->connection) {
- goto cleanup_s2n_handler;
+ goto cleanup_conn;
}
aws_tls_channel_handler_shared_init(&s2n_handler->shared_state, &s2n_handler->handler, options);
- s2n_handler->handler.impl = s2n_handler;
- s2n_handler->handler.alloc = allocator;
- s2n_handler->handler.vtable = &s_handler_vtable;
- s2n_handler->handler.slot = slot;
s2n_handler->user_data = options->user_data;
s2n_handler->on_data_read = options->on_data_read;
s2n_handler->on_error = options->on_error;
@@ -762,12 +1118,13 @@ static struct aws_channel_handler *s_new_tls_handler(
}
}
- s2n_handler->negotiation_finished = false;
+ s2n_handler->state = NEGOTIATION_ONGOING;
s2n_connection_set_recv_cb(s2n_handler->connection, s_s2n_handler_recv);
s2n_connection_set_recv_ctx(s2n_handler->connection, s2n_handler);
s2n_connection_set_send_cb(s2n_handler->connection, s_s2n_handler_send);
s2n_connection_set_send_ctx(s2n_handler->connection, s2n_handler);
+ s2n_connection_set_ctx(s2n_handler->connection, s2n_handler);
s2n_connection_set_blinding(s2n_handler->connection, S2N_SELF_SERVICE_BLINDING);
if (options->alpn_list) {
@@ -798,7 +1155,7 @@ static struct aws_channel_handler *s_new_tls_handler(
}
}
- if (s2n_connection_set_config(s2n_handler->connection, s2n_ctx->s2n_config)) {
+ if (s2n_connection_set_config(s2n_handler->connection, s2n_handler->s2n_ctx->s2n_config)) {
AWS_LOGF_WARN(
AWS_LS_IO_TLS,
"id=%p: configuration error %s (%s)",
@@ -809,6 +1166,12 @@ static struct aws_channel_handler *s_new_tls_handler(
goto cleanup_conn;
}
+ aws_channel_task_init(
+ &s2n_handler->delayed_shutdown_task.task,
+ s_delayed_shutdown_task_fn,
+ &s2n_handler->handler,
+ "s2n_delayed_shutdown");
+
if (s_s2n_tls_channel_handler_schedule_thread_local_cleanup(slot)) {
goto cleanup_conn;
}
@@ -816,10 +1179,7 @@ static struct aws_channel_handler *s_new_tls_handler(
return &s2n_handler->handler;
cleanup_conn:
- s2n_connection_free(s2n_handler->connection);
-
-cleanup_s2n_handler:
- aws_mem_release(allocator, s2n_handler);
+ s_s2n_handler_destroy(&s2n_handler->handler);
return NULL;
}
@@ -842,11 +1202,85 @@ struct aws_channel_handler *aws_tls_server_handler_new(
static void s_s2n_ctx_destroy(struct s2n_ctx *s2n_ctx) {
if (s2n_ctx != NULL) {
+ if (s2n_ctx->pkcs11.session_handle != 0) {
+ aws_pkcs11_lib_close_session(s2n_ctx->pkcs11.lib, s2n_ctx->pkcs11.session_handle);
+ }
+ aws_mutex_clean_up(&s2n_ctx->pkcs11.session_lock);
+ aws_pkcs11_lib_release(s2n_ctx->pkcs11.lib);
s2n_config_free(s2n_ctx->s2n_config);
+
+ if (s2n_ctx->custom_cert_chain_and_key) {
+ s2n_cert_chain_and_key_free(s2n_ctx->custom_cert_chain_and_key);
+ }
+
aws_mem_release(s2n_ctx->ctx.alloc, s2n_ctx);
}
}
+static int s2n_wall_clock_time_nanoseconds(void *context, uint64_t *time_in_ns) {
+ (void)context;
+ if (aws_sys_clock_get_ticks(time_in_ns)) {
+ *time_in_ns = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int s2n_monotonic_clock_time_nanoseconds(void *context, uint64_t *time_in_ns) {
+ (void)context;
+ if (aws_high_res_clock_get_ticks(time_in_ns)) {
+ *time_in_ns = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int s_tls_ctx_pkcs11_setup(struct s2n_ctx *s2n_ctx, const struct aws_tls_ctx_options *options) {
+ /* PKCS#11 options were already sanitized (ie: check for required args) in tls_channel_handler.c */
+
+ /* anything initialized in this function is cleaned up during s_s2n_ctx_destroy()
+ * so don't worry about cleaning up unless it's some tmp heap allocation */
+
+ s2n_ctx->pkcs11.lib = aws_pkcs11_lib_acquire(options->pkcs11.lib); /* cannot fail */
+ aws_mutex_init(&s2n_ctx->pkcs11.session_lock);
+
+ CK_SLOT_ID slot_id = 0;
+ if (aws_pkcs11_lib_find_slot_with_token(
+ s2n_ctx->pkcs11.lib,
+ options->pkcs11.has_slot_id ? &options->pkcs11.slot_id : NULL,
+ options->pkcs11.token_label,
+ &slot_id /*out*/)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_pkcs11_lib_open_session(s2n_ctx->pkcs11.lib, slot_id, &s2n_ctx->pkcs11.session_handle)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_pkcs11_lib_login_user(s2n_ctx->pkcs11.lib, s2n_ctx->pkcs11.session_handle, options->pkcs11.user_pin)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_pkcs11_lib_find_private_key(
+ s2n_ctx->pkcs11.lib,
+ s2n_ctx->pkcs11.session_handle,
+ options->pkcs11.private_key_object_label,
+ &s2n_ctx->pkcs11.private_key_handle /*out*/,
+ &s2n_ctx->pkcs11.private_key_type /*out*/)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_log_and_raise_s2n_errno(const char *msg) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS, "%s: %s (%s)", msg, s2n_strerror(s2n_errno, "EN"), s2n_strerror_debug(s2n_errno, "EN"));
+ aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+}
+
static struct aws_tls_ctx *s_tls_ctx_new(
struct aws_allocator *alloc,
const struct aws_tls_ctx_options *options,
@@ -866,33 +1300,69 @@ static struct aws_tls_ctx *s_tls_ctx_new(
s2n_ctx->ctx.alloc = alloc;
s2n_ctx->ctx.impl = s2n_ctx;
aws_ref_count_init(&s2n_ctx->ctx.ref_count, s2n_ctx, (aws_simple_completion_callback *)s_s2n_ctx_destroy);
- s2n_ctx->s2n_config = s2n_config_new();
+ s2n_ctx->s2n_config = s2n_config_new();
if (!s2n_ctx->s2n_config) {
- goto cleanup_s2n_ctx;
+ s_log_and_raise_s2n_errno("ctx: creation failed");
+ goto cleanup_s2n_config;
}
- switch (options->minimum_tls_version) {
- case AWS_IO_SSLv3:
- s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "CloudFront-SSL-v-3");
- break;
- case AWS_IO_TLSv1:
- s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "CloudFront-TLS-1-0-2014");
- break;
- case AWS_IO_TLSv1_1:
- s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-1-2017-01");
- break;
- case AWS_IO_TLSv1_2:
- s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-2-Ext-2018-06");
- break;
- case AWS_IO_TLSv1_3:
- AWS_LOGF_ERROR(AWS_LS_IO_TLS, "TLS 1.3 is not supported yet.");
- /* sorry guys, we'll add this as soon as s2n does. */
- aws_raise_error(AWS_IO_TLS_VERSION_UNSUPPORTED);
- goto cleanup_s2n_ctx;
- case AWS_IO_TLS_VER_SYS_DEFAULTS:
- default:
- s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-1-2017-01");
+ int set_clock_result = s2n_config_set_wall_clock(s2n_ctx->s2n_config, s2n_wall_clock_time_nanoseconds, NULL);
+ if (set_clock_result != S2N_ERR_T_OK) {
+ s_log_and_raise_s2n_errno("ctx: failed to set wall clock");
+ goto cleanup_s2n_config;
+ }
+
+ set_clock_result = s2n_config_set_monotonic_clock(s2n_ctx->s2n_config, s2n_monotonic_clock_time_nanoseconds, NULL);
+ if (set_clock_result != S2N_ERR_T_OK) {
+ s_log_and_raise_s2n_errno("ctx: failed to set monotonic clock");
+ goto cleanup_s2n_config;
+ }
+
+ if (options->pkcs11.lib != NULL) {
+ /* PKCS#11 integration hasn't been tested with TLS 1.3, so don't use cipher preferences that allow 1.3 */
+ switch (options->minimum_tls_version) {
+ case AWS_IO_SSLv3:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "CloudFront-SSL-v-3");
+ break;
+ case AWS_IO_TLSv1:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "CloudFront-TLS-1-0-2014");
+ break;
+ case AWS_IO_TLSv1_1:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-1-2017-01");
+ break;
+ case AWS_IO_TLSv1_2:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-2-Ext-2018-06");
+ break;
+ case AWS_IO_TLSv1_3:
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "TLS 1.3 with PKCS#11 is not supported yet.");
+ aws_raise_error(AWS_IO_TLS_VERSION_UNSUPPORTED);
+ goto cleanup_s2n_config;
+ case AWS_IO_TLS_VER_SYS_DEFAULTS:
+ default:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "ELBSecurityPolicy-TLS-1-1-2017-01");
+ }
+ } else {
+ switch (options->minimum_tls_version) {
+ case AWS_IO_SSLv3:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-SSLv3.0");
+ break;
+ case AWS_IO_TLSv1:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.0");
+ break;
+ case AWS_IO_TLSv1_1:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.1");
+ break;
+ case AWS_IO_TLSv1_2:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.2");
+ break;
+ case AWS_IO_TLSv1_3:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.3");
+ break;
+ case AWS_IO_TLS_VER_SYS_DEFAULTS:
+ default:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "AWS-CRT-SDK-TLSv1.0");
+ }
}
switch (options->cipher_pref) {
@@ -914,118 +1384,148 @@ static struct aws_tls_ctx *s_tls_ctx_new(
case AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_07:
s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "KMS-PQ-TLS-1-0-2020-07");
break;
+ case AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05:
+ s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, "PQ-TLS-1-0-2021-05-26");
+ break;
default:
AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Unrecognized TLS Cipher Preference: %d", options->cipher_pref);
aws_raise_error(AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED);
- goto cleanup_s2n_ctx;
+ goto cleanup_s2n_config;
}
- if (options->certificate.len && options->private_key.len) {
+ if (aws_tls_options_buf_is_set(&options->certificate) && aws_tls_options_buf_is_set(&options->private_key)) {
AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "ctx: Certificate and key have been set, setting them up now.");
if (!aws_text_is_utf8(options->certificate.buffer, options->certificate.len)) {
AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import certificate, must be ASCII/UTF-8 encoded");
aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE);
- goto cleanup_s2n_ctx;
+ goto cleanup_s2n_config;
}
if (!aws_text_is_utf8(options->private_key.buffer, options->private_key.len)) {
AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import private key, must be ASCII/UTF-8 encoded");
aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE);
- goto cleanup_s2n_ctx;
+ goto cleanup_s2n_config;
}
+ /* Ensure that what we pass to s2n is zero-terminated */
+ struct aws_string *certificate_string = aws_string_new_from_buf(alloc, &options->certificate);
+ struct aws_string *private_key_string = aws_string_new_from_buf(alloc, &options->private_key);
+
int err_code = s2n_config_add_cert_chain_and_key(
- s2n_ctx->s2n_config, (const char *)options->certificate.buffer, (const char *)options->private_key.buffer);
+ s2n_ctx->s2n_config, (const char *)certificate_string->bytes, (const char *)private_key_string->bytes);
+
+ aws_string_destroy(certificate_string);
+ aws_string_destroy_secure(private_key_string);
if (mode == S2N_CLIENT) {
s2n_config_set_client_auth_type(s2n_ctx->s2n_config, S2N_CERT_AUTH_REQUIRED);
}
if (err_code != S2N_ERR_T_OK) {
- AWS_LOGF_ERROR(
- AWS_LS_IO_TLS,
- "ctx: configuration error %s (%s)",
- s2n_strerror(s2n_errno, "EN"),
- s2n_strerror_debug(s2n_errno, "EN"));
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ s_log_and_raise_s2n_errno("ctx: Failed to add certificate and private key");
+ goto cleanup_s2n_config;
+ }
+ } else if (options->pkcs11.lib != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "ctx: PKCS#11 has been set, setting it up now.");
+ if (s_tls_ctx_pkcs11_setup(s2n_ctx, options)) {
+ goto cleanup_s2n_config;
+ }
+
+ /* set callback so that we can do private key operations through PKCS#11 */
+ if (s2n_config_set_async_pkey_callback(s2n_ctx->s2n_config, s_s2n_pkcs11_async_pkey_callback)) {
+ s_log_and_raise_s2n_errno("ctx: failed to set private key callback");
+ goto cleanup_s2n_config;
+ }
+
+ /* set certificate.
+ * we need to create a custom s2n_cert_chain_and_key that knows the cert but not the key */
+ s2n_ctx->custom_cert_chain_and_key = s2n_cert_chain_and_key_new();
+ if (!s2n_ctx->custom_cert_chain_and_key) {
+ s_log_and_raise_s2n_errno("ctx: creation failed");
+ goto cleanup_s2n_config;
+ }
+
+ if (s2n_cert_chain_and_key_load_public_pem_bytes(
+ s2n_ctx->custom_cert_chain_and_key, options->certificate.buffer, options->certificate.len)) {
+ s_log_and_raise_s2n_errno("ctx: failed to load certificate");
+ goto cleanup_s2n_config;
+ }
+
+ if (s2n_config_add_cert_chain_and_key_to_store(s2n_ctx->s2n_config, s2n_ctx->custom_cert_chain_and_key)) {
+ s_log_and_raise_s2n_errno("ctx: failed to add certificate to store");
goto cleanup_s2n_config;
}
+
+ if (mode == S2N_CLIENT) {
+ s2n_config_set_client_auth_type(s2n_ctx->s2n_config, S2N_CERT_AUTH_REQUIRED);
+ }
}
if (options->verify_peer) {
if (s2n_config_set_check_stapled_ocsp_response(s2n_ctx->s2n_config, 1) == S2N_SUCCESS) {
if (s2n_config_set_status_request_type(s2n_ctx->s2n_config, S2N_STATUS_REQUEST_OCSP) != S2N_SUCCESS) {
- AWS_LOGF_ERROR(
- AWS_LS_IO_TLS,
- "ctx: ocsp status request cannot be set: %s (%s)",
- s2n_strerror(s2n_errno, "EN"),
- s2n_strerror_debug(s2n_errno, "EN"));
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ s_log_and_raise_s2n_errno("ctx: ocsp status request cannot be set");
goto cleanup_s2n_config;
}
} else {
if (s2n_error_get_type(s2n_errno) == S2N_ERR_T_USAGE) {
AWS_LOGF_INFO(AWS_LS_IO_TLS, "ctx: cannot enable ocsp stapling: %s", s2n_strerror(s2n_errno, "EN"));
} else {
- AWS_LOGF_ERROR(
- AWS_LS_IO_TLS,
- "ctx: cannot enable ocsp stapling: %s (%s)",
- s2n_strerror(s2n_errno, "EN"),
- s2n_strerror_debug(s2n_errno, "EN"));
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ s_log_and_raise_s2n_errno("ctx: cannot enable ocsp stapling");
goto cleanup_s2n_config;
}
}
- if (options->ca_path) {
- if (s2n_config_set_verification_ca_location(
- s2n_ctx->s2n_config, NULL, aws_string_c_str(options->ca_path))) {
- AWS_LOGF_ERROR(
- AWS_LS_IO_TLS,
- "ctx: configuration error %s (%s)",
- s2n_strerror(s2n_errno, "EN"),
- s2n_strerror_debug(s2n_errno, "EN"));
- AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Failed to set ca_path %s\n", aws_string_c_str(options->ca_path));
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ if (options->ca_path || aws_tls_options_buf_is_set(&options->ca_file)) {
+ /* The user called an override_default_trust_store() function.
+ * Begin by wiping anything that s2n loaded by default */
+ if (s2n_config_wipe_trust_store(s2n_ctx->s2n_config)) {
+ s_log_and_raise_s2n_errno("ctx: failed to wipe default trust store");
goto cleanup_s2n_config;
}
- }
- if (options->ca_file.len) {
- if (s2n_config_add_pem_to_trust_store(s2n_ctx->s2n_config, (const char *)options->ca_file.buffer)) {
- AWS_LOGF_ERROR(
- AWS_LS_IO_TLS,
- "ctx: configuration error %s (%s)",
- s2n_strerror(s2n_errno, "EN"),
- s2n_strerror_debug(s2n_errno, "EN"));
- AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Failed to set ca_file %s\n", (const char *)options->ca_file.buffer);
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
- goto cleanup_s2n_config;
+ if (options->ca_path) {
+ if (s2n_config_set_verification_ca_location(
+ s2n_ctx->s2n_config, NULL, aws_string_c_str(options->ca_path))) {
+ s_log_and_raise_s2n_errno("ctx: configuration error");
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Failed to set ca_path %s\n", aws_string_c_str(options->ca_path));
+ goto cleanup_s2n_config;
+ }
}
- }
- if (!options->ca_path && !options->ca_file.len) {
+ if (aws_tls_options_buf_is_set(&options->ca_file)) {
+ /* Ensure that what we pass to s2n is zero-terminated */
+ struct aws_string *ca_file_string = aws_string_new_from_buf(alloc, &options->ca_file);
+ int set_ca_result =
+ s2n_config_add_pem_to_trust_store(s2n_ctx->s2n_config, (const char *)ca_file_string->bytes);
+ aws_string_destroy(ca_file_string);
+
+ if (set_ca_result) {
+ s_log_and_raise_s2n_errno("ctx: configuration error");
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Failed to set ca_file %s\n", (const char *)options->ca_file.buffer);
+ goto cleanup_s2n_config;
+ }
+ }
+ } else {
+ /* User wants to use the system's default trust store.
+ *
+ * Note that s2n's trust store always starts with libcrypto's default locations.
+ * These paths are configured when libcrypto is built (--openssldir),
+ * but might not be right for the current machine (e.g. if libcrypto
+ * is statically linked into an application that is distributed
+ * to multiple flavors of Linux). Therefore, load the locations that
+ * were found at library startup. */
if (s2n_config_set_verification_ca_location(s2n_ctx->s2n_config, s_default_ca_file, s_default_ca_dir)) {
- AWS_LOGF_ERROR(
- AWS_LS_IO_TLS,
- "ctx: configuration error %s (%s)",
- s2n_strerror(s2n_errno, "EN"),
- s2n_strerror_debug(s2n_errno, "EN"));
+ s_log_and_raise_s2n_errno("ctx: configuration error");
AWS_LOGF_ERROR(
AWS_LS_IO_TLS, "Failed to set ca_path: %s and ca_file %s\n", s_default_ca_dir, s_default_ca_file);
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
goto cleanup_s2n_config;
}
}
if (mode == S2N_SERVER && s2n_config_set_client_auth_type(s2n_ctx->s2n_config, S2N_CERT_AUTH_REQUIRED)) {
- AWS_LOGF_ERROR(
- AWS_LS_IO_TLS,
- "ctx: configuration error %s (%s)",
- s2n_strerror(s2n_errno, "EN"),
- s2n_strerror_debug(s2n_errno, "EN"));
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ s_log_and_raise_s2n_errno("ctx: failed to set client auth type");
goto cleanup_s2n_config;
}
} else if (mode != S2N_SERVER) {
@@ -1034,7 +1534,7 @@ static struct aws_tls_ctx *s_tls_ctx_new(
"ctx: X.509 validation has been disabled. "
"If this is not running in a test environment, this is likely a security vulnerability.");
if (s2n_config_disable_x509_verification(s2n_ctx->s2n_config)) {
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ s_log_and_raise_s2n_errno("ctx: failed to disable x509 verification");
goto cleanup_s2n_config;
}
}
@@ -1045,7 +1545,7 @@ static struct aws_tls_ctx *s_tls_ctx_new(
AWS_ZERO_ARRAY(protocols_cpy);
size_t protocols_size = 4;
if (s_parse_protocol_preferences(options->alpn_list, protocols_cpy, &protocols_size)) {
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ s_log_and_raise_s2n_errno("ctx: Failed to parse ALPN list");
goto cleanup_s2n_config;
}
@@ -1056,7 +1556,7 @@ static struct aws_tls_ctx *s_tls_ctx_new(
}
if (s2n_config_set_protocol_preferences(s2n_ctx->s2n_config, protocols, (int)protocols_size)) {
- aws_raise_error(AWS_IO_TLS_CTX_ERROR);
+ s_log_and_raise_s2n_errno("ctx: Failed to set protocol preferences");
goto cleanup_s2n_config;
}
}
@@ -1074,10 +1574,7 @@ static struct aws_tls_ctx *s_tls_ctx_new(
return &s2n_ctx->ctx;
cleanup_s2n_config:
- s2n_config_free(s2n_ctx->s2n_config);
-
-cleanup_s2n_ctx:
- aws_mem_release(alloc, s2n_ctx);
+ s_s2n_ctx_destroy(s2n_ctx);
return NULL;
}
diff --git a/contrib/restricted/aws/aws-c-io/source/socket_channel_handler.c b/contrib/restricted/aws/aws-c-io/source/socket_channel_handler.c
index 40a178123b..6e78b9d6b5 100644
--- a/contrib/restricted/aws/aws-c-io/source/socket_channel_handler.c
+++ b/contrib/restricted/aws/aws-c-io/source/socket_channel_handler.c
@@ -348,13 +348,19 @@ static void s_reset_statistics(struct aws_channel_handler *handler) {
aws_crt_statistics_socket_reset(&socket_handler->stats);
}
-void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats_list) {
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats_list) {
struct socket_handler *socket_handler = (struct socket_handler *)handler->impl;
void *stats_base = &socket_handler->stats;
aws_array_list_push_back(stats_list, &stats_base);
}
+static void s_trigger_read(struct aws_channel_handler *handler) {
+ struct socket_handler *socket_handler = (struct socket_handler *)handler->impl;
+
+ s_do_read(socket_handler);
+}
+
static struct aws_channel_handler_vtable s_vtable = {
.process_read_message = s_socket_process_read_message,
.destroy = s_socket_destroy,
@@ -365,6 +371,7 @@ static struct aws_channel_handler_vtable s_vtable = {
.message_overhead = s_message_overhead,
.reset_statistics = s_reset_statistics,
.gather_statistics = s_gather_statistics,
+ .trigger_read = s_trigger_read,
};
struct aws_channel_handler *aws_socket_handler_new(
diff --git a/contrib/restricted/aws/aws-c-io/source/standard_retry_strategy.c b/contrib/restricted/aws/aws-c-io/source/standard_retry_strategy.c
new file mode 100644
index 0000000000..f19493e17e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/source/standard_retry_strategy.c
@@ -0,0 +1,497 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/io/logging.h>
+#include <aws/io/retry_strategy.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+
+#include <inttypes.h>
+
+AWS_STRING_FROM_LITERAL(s_empty_string, "");
+static struct aws_byte_cursor s_empty_string_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("");
+static const size_t s_initial_retry_bucket_capacity = 500u;
+static const size_t s_standard_retry_cost = 5u;
+static const size_t s_standard_transient_cost = 10u;
+static const size_t s_standard_no_retry_cost = 1u;
+
+struct retry_bucket {
+ struct aws_allocator *allocator;
+ struct aws_retry_strategy *owner;
+ struct aws_string *partition_id;
+ struct aws_byte_cursor partition_id_cur;
+ struct {
+ size_t current_capacity;
+ struct aws_mutex partition_lock;
+ } synced_data;
+};
+
+struct retry_bucket_token {
+ struct aws_retry_token retry_token;
+ struct retry_bucket *strategy_bucket;
+ struct aws_retry_token *exp_backoff_token;
+ aws_retry_strategy_on_retry_token_acquired_fn *original_on_acquired;
+ aws_retry_strategy_on_retry_ready_fn *original_on_ready;
+ size_t last_retry_cost;
+ void *original_user_data;
+};
+
+static bool s_partition_id_equals_byte_cur(const void *seated_cur, const void *cur_ptr) {
+ return aws_byte_cursor_eq_ignore_case(seated_cur, cur_ptr);
+}
+
+static uint64_t s_hash_partition_id(const void *seated_partition_ptr) {
+ return aws_hash_byte_cursor_ptr_ignore_case(seated_partition_ptr);
+}
+
+static void s_destroy_standard_retry_bucket(void *retry_bucket) {
+ struct retry_bucket *standard_retry_bucket = retry_bucket;
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: destroying bucket partition " PRInSTR,
+ (void *)standard_retry_bucket->owner,
+ AWS_BYTE_CURSOR_PRI(standard_retry_bucket->partition_id_cur));
+ aws_string_destroy(standard_retry_bucket->partition_id);
+ aws_mutex_clean_up(&standard_retry_bucket->synced_data.partition_lock);
+ aws_mem_release(standard_retry_bucket->allocator, standard_retry_bucket);
+}
+
+struct standard_strategy {
+ struct aws_retry_strategy base;
+ struct aws_retry_strategy *exponential_backoff_retry_strategy;
+ size_t max_capacity;
+ struct {
+ struct aws_hash_table token_buckets;
+ struct aws_mutex lock;
+ } synced_data;
+};
+
+static void s_standard_retry_destroy(struct aws_retry_strategy *retry_strategy) {
+ AWS_LOGF_TRACE(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: destroying self", (void *)retry_strategy);
+ struct standard_strategy *standard_strategy = retry_strategy->impl;
+ aws_retry_strategy_release(standard_strategy->exponential_backoff_retry_strategy);
+ aws_hash_table_clean_up(&standard_strategy->synced_data.token_buckets);
+ aws_mutex_clean_up(&standard_strategy->synced_data.lock);
+ aws_mem_release(retry_strategy->allocator, standard_strategy);
+}
+
+static void s_on_standard_retry_token_acquired(
+ struct aws_retry_strategy *retry_strategy,
+ int error_code,
+ struct aws_retry_token *token,
+ void *user_data) {
+ (void)retry_strategy;
+ (void)token;
+
+ struct retry_bucket_token *retry_token = user_data;
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: token acquired callback invoked with error %s with token %p and nested token %p",
+ (void *)retry_token->retry_token.retry_strategy,
+ aws_error_str(error_code),
+ (void *)&retry_token->retry_token,
+ (void *)token);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: invoking on_retry_token_acquired callback",
+ (void *)retry_token->retry_token.retry_strategy);
+
+ aws_retry_token_acquire(&retry_token->retry_token);
+ if (!error_code) {
+ retry_token->exp_backoff_token = token;
+
+ retry_token->original_on_acquired(
+ retry_token->strategy_bucket->owner,
+ error_code,
+ &retry_token->retry_token,
+ retry_token->original_user_data);
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: on_retry_token_acquired callback completed",
+ (void *)retry_token->retry_token.retry_strategy);
+
+ } else {
+ retry_token->original_on_acquired(
+ retry_token->strategy_bucket->owner, error_code, NULL, retry_token->original_user_data);
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: on_retry_token_acquired callback completed",
+ (void *)retry_token->retry_token.retry_strategy);
+ }
+ aws_retry_token_release(&retry_token->retry_token);
+}
+
+static int s_standard_retry_acquire_token(
+ struct aws_retry_strategy *retry_strategy,
+ const struct aws_byte_cursor *partition_id,
+ aws_retry_strategy_on_retry_token_acquired_fn *on_acquired,
+ void *user_data,
+ uint64_t timeout_ms) {
+ struct standard_strategy *standard_strategy = retry_strategy->impl;
+ bool bucket_needs_cleanup = false;
+
+ const struct aws_byte_cursor *partition_id_ptr =
+ !partition_id || partition_id->len == 0 ? &s_empty_string_cur : partition_id;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: attempting to acquire retry token for partition_id " PRInSTR,
+ (void *)retry_strategy,
+ AWS_BYTE_CURSOR_PRI(*partition_id_ptr));
+
+ struct retry_bucket_token *token = aws_mem_calloc(retry_strategy->allocator, 1, sizeof(struct retry_bucket_token));
+ if (!token) {
+ return AWS_OP_ERR;
+ }
+
+ token->original_user_data = user_data;
+ token->original_on_acquired = on_acquired;
+
+ struct aws_hash_element *element_ptr;
+ struct retry_bucket *bucket_ptr;
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&standard_strategy->synced_data.lock) && "Lock acquisition failed.");
+ aws_hash_table_find(&standard_strategy->synced_data.token_buckets, partition_id_ptr, &element_ptr);
+ if (!element_ptr) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: bucket for partition_id " PRInSTR " does not exist, attempting to create one",
+ (void *)retry_strategy,
+ AWS_BYTE_CURSOR_PRI(*partition_id_ptr));
+ bucket_ptr = aws_mem_calloc(standard_strategy->base.allocator, 1, sizeof(struct retry_bucket));
+
+ if (!bucket_ptr) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: error when allocating bucket %s",
+ (void *)retry_strategy,
+ aws_error_debug_str(aws_last_error()));
+ goto table_locked;
+ }
+
+ bucket_needs_cleanup = true;
+ bucket_ptr->allocator = standard_strategy->base.allocator;
+ bucket_ptr->partition_id = partition_id_ptr->len > 0
+ ? aws_string_new_from_cursor(standard_strategy->base.allocator, partition_id)
+ : (struct aws_string *)s_empty_string;
+
+ if (!bucket_ptr->partition_id) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: error when allocating partition_id %s",
+ (void *)retry_strategy,
+ aws_error_debug_str(aws_last_error()));
+ goto table_locked;
+ }
+
+ bucket_ptr->partition_id_cur = aws_byte_cursor_from_string(bucket_ptr->partition_id);
+ AWS_FATAL_ASSERT(!aws_mutex_init(&bucket_ptr->synced_data.partition_lock) && "mutex init failed!");
+ bucket_ptr->owner = retry_strategy;
+ bucket_ptr->synced_data.current_capacity = standard_strategy->max_capacity;
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: bucket %p for partition_id " PRInSTR " created",
+ (void *)retry_strategy,
+ (void *)bucket_ptr,
+ AWS_BYTE_CURSOR_PRI(*partition_id_ptr));
+
+ if (aws_hash_table_put(
+ &standard_strategy->synced_data.token_buckets, &bucket_ptr->partition_id_cur, bucket_ptr, NULL)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: error when putting bucket to token_bucket table %s",
+ (void *)retry_strategy,
+ aws_error_debug_str(aws_last_error()));
+ goto table_locked;
+ }
+ bucket_needs_cleanup = false;
+ } else {
+ bucket_ptr = element_ptr->value;
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: bucket %p for partition_id " PRInSTR " found",
+ (void *)retry_strategy,
+ (void *)bucket_ptr,
+ AWS_BYTE_CURSOR_PRI(*partition_id_ptr));
+ }
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&standard_strategy->synced_data.lock) && "Mutex unlock failed");
+
+ token->strategy_bucket = bucket_ptr;
+ token->retry_token.retry_strategy = retry_strategy;
+ aws_atomic_init_int(&token->retry_token.ref_count, 1u);
+ aws_retry_strategy_acquire(retry_strategy);
+ token->retry_token.allocator = retry_strategy->allocator;
+ token->retry_token.impl = token;
+
+ /* don't decrement the capacity counter, but add the retry payback, so making calls that succeed allows for a
+ * gradual recovery of the bucket capacity. Otherwise, we'd never recover from an outage. */
+ token->last_retry_cost = s_standard_no_retry_cost;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: allocated token %p for partition_id " PRInSTR,
+ (void *)retry_strategy,
+ (void *)&token->retry_token,
+ AWS_BYTE_CURSOR_PRI(*partition_id_ptr));
+
+ if (aws_retry_strategy_acquire_retry_token(
+ standard_strategy->exponential_backoff_retry_strategy,
+ partition_id_ptr,
+ s_on_standard_retry_token_acquired,
+ token,
+ timeout_ms)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: error when acquiring retry token from backing retry strategy %p: %s",
+ (void *)retry_strategy,
+ (void *)standard_strategy->exponential_backoff_retry_strategy,
+ aws_error_debug_str(aws_last_error()));
+ goto table_updated;
+ }
+
+ return AWS_OP_SUCCESS;
+
+table_updated:
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&standard_strategy->synced_data.lock) && "Mutex lock failed");
+ aws_hash_table_remove(&standard_strategy->synced_data.token_buckets, &bucket_ptr->partition_id_cur, NULL, NULL);
+ bucket_needs_cleanup = false;
+
+table_locked:
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&standard_strategy->synced_data.lock) && "Mutex unlock failed");
+
+ if (bucket_needs_cleanup) {
+ s_destroy_standard_retry_bucket(bucket_ptr);
+ }
+
+ aws_retry_token_release(&token->retry_token);
+
+ return AWS_OP_ERR;
+}
+
+void s_standard_retry_strategy_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) {
+ (void)token;
+
+ struct aws_retry_token *standard_retry_token = user_data;
+ struct retry_bucket_token *impl = standard_retry_token->impl;
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: invoking on_retry_ready callback with error %s, token %p, and nested token %p",
+ (void *)token->retry_strategy,
+ aws_error_str(error_code),
+ (void *)standard_retry_token,
+ (void *)token);
+ struct aws_retry_strategy *retry_strategy = token->retry_strategy;
+ /* we already hold a reference count here due to the previous acquire before scheduling, so don't worry
+ * about incrementing standard_retry_token here */
+ impl->original_on_ready(standard_retry_token, error_code, impl->original_user_data);
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: on_retry_ready callback completed", (void *)retry_strategy);
+ /* this is to release the acquire we did before scheduling the retry. Release it now. */
+ aws_retry_token_release(standard_retry_token);
+}
+
+static int s_standard_retry_strategy_schedule_retry(
+ struct aws_retry_token *token,
+ enum aws_retry_error_type error_type,
+ aws_retry_strategy_on_retry_ready_fn *retry_ready,
+ void *user_data) {
+
+ if (error_type == AWS_RETRY_ERROR_TYPE_CLIENT_ERROR) {
+ return aws_raise_error(AWS_IO_RETRY_PERMISSION_DENIED);
+ }
+
+ struct retry_bucket_token *impl = token->impl;
+
+ size_t capacity_consumed = 0;
+
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex lock failed");
+ size_t current_capacity = impl->strategy_bucket->synced_data.current_capacity;
+ if (current_capacity == 0) {
+ AWS_FATAL_ASSERT(
+ !aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed");
+ AWS_LOGF_INFO(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "token_id=%p: requested to schedule retry but the bucket capacity is empty. Rejecting retry request.",
+ (void *)token);
+ return aws_raise_error(AWS_IO_RETRY_PERMISSION_DENIED);
+ }
+
+ if (error_type == AWS_RETRY_ERROR_TYPE_TRANSIENT) {
+ capacity_consumed = aws_min_size(current_capacity, s_standard_transient_cost);
+ } else {
+ /* you may be looking for throttling, but if that happened, the service told us to slow down,
+ * but is otherwise healthy. Pay a smaller penalty for those. */
+ capacity_consumed = aws_min_size(current_capacity, s_standard_retry_cost);
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "token_id=%p: reducing retry capacity by %zu from %zu and scheduling retry.",
+ (void *)token,
+ capacity_consumed,
+ current_capacity);
+ impl->original_user_data = user_data;
+ impl->original_on_ready = retry_ready;
+
+ size_t previous_cost = impl->last_retry_cost;
+ impl->last_retry_cost = capacity_consumed;
+ impl->strategy_bucket->synced_data.current_capacity -= capacity_consumed;
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed");
+
+ /* acquire before scheduling to prevent clean up before the callback runs. */
+ aws_retry_token_acquire(&impl->retry_token);
+ if (aws_retry_strategy_schedule_retry(
+ impl->exp_backoff_token, error_type, s_standard_retry_strategy_on_retry_ready, token)) {
+ /* release for the above acquire */
+ aws_retry_token_release(&impl->retry_token);
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "token_id=%p: error occurred while scheduling retry: %s.",
+ (void *)token,
+ aws_error_debug_str(aws_last_error()));
+ /* roll it back. */
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex lock failed");
+ impl->last_retry_cost = previous_cost;
+ size_t desired_capacity = impl->strategy_bucket->synced_data.current_capacity + capacity_consumed;
+ struct standard_strategy *strategy_impl = token->retry_strategy->impl;
+ impl->strategy_bucket->synced_data.current_capacity =
+ desired_capacity < strategy_impl->max_capacity ? desired_capacity : strategy_impl->max_capacity;
+ AWS_FATAL_ASSERT(
+ !aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed");
+
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_standard_retry_strategy_record_success(struct aws_retry_token *token) {
+ struct retry_bucket_token *impl = token->impl;
+
+ AWS_FATAL_ASSERT(!aws_mutex_lock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex lock failed");
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "token_id=%p: partition=" PRInSTR
+ ": recording successful operation and adding %zu units of capacity back to the bucket.",
+ (void *)token,
+ AWS_BYTE_CURSOR_PRI(impl->strategy_bucket->partition_id_cur),
+ impl->last_retry_cost);
+ size_t capacity_payback = impl->strategy_bucket->synced_data.current_capacity + impl->last_retry_cost;
+ struct standard_strategy *standard_strategy = token->retry_strategy->impl;
+ impl->strategy_bucket->synced_data.current_capacity =
+ capacity_payback < standard_strategy->max_capacity ? capacity_payback : standard_strategy->max_capacity;
+ impl->last_retry_cost = 0;
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "bucket_id=%p: partition=" PRInSTR " : new capacity is %zu.",
+ (void *)token,
+ AWS_BYTE_CURSOR_PRI(impl->strategy_bucket->partition_id_cur),
+ impl->strategy_bucket->synced_data.current_capacity);
+ AWS_FATAL_ASSERT(!aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed");
+ return AWS_OP_SUCCESS;
+}
+
+static void s_standard_retry_strategy_release_token(struct aws_retry_token *token) {
+ if (token) {
+ AWS_LOGF_TRACE(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: releasing token", (void *)token);
+ struct retry_bucket_token *impl = token->impl;
+ aws_retry_token_release(impl->exp_backoff_token);
+ aws_retry_strategy_release(token->retry_strategy);
+ aws_mem_release(token->allocator, impl);
+ }
+}
+
+static struct aws_retry_strategy_vtable s_standard_retry_vtable = {
+ .schedule_retry = s_standard_retry_strategy_schedule_retry,
+ .acquire_token = s_standard_retry_acquire_token,
+ .release_token = s_standard_retry_strategy_release_token,
+ .destroy = s_standard_retry_destroy,
+ .record_success = s_standard_retry_strategy_record_success,
+};
+
+struct aws_retry_strategy *aws_retry_strategy_new_standard(
+ struct aws_allocator *allocator,
+ const struct aws_standard_retry_options *config) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(config);
+
+ AWS_LOGF_INFO(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "static: creating new standard retry strategy");
+ struct standard_strategy *standard_strategy = aws_mem_calloc(allocator, 1, sizeof(struct standard_strategy));
+
+ if (!standard_strategy) {
+ AWS_LOGF_ERROR(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "static: allocation of new standard retry strategy failed");
+ return NULL;
+ }
+
+ aws_atomic_init_int(&standard_strategy->base.ref_count, 1);
+
+ struct aws_exponential_backoff_retry_options config_cpy = config->backoff_retry_options;
+
+ /* standard default is 3. */
+ if (!config->backoff_retry_options.max_retries) {
+ config_cpy.max_retries = 3;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: creating backing exponential backoff strategy with max_retries of %zu",
+ (void *)&standard_strategy->base,
+ config_cpy.max_retries);
+
+ standard_strategy->exponential_backoff_retry_strategy =
+ aws_retry_strategy_new_exponential_backoff(allocator, &config_cpy);
+
+ if (!standard_strategy->exponential_backoff_retry_strategy) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: allocation of new exponential backoff retry strategy failed: %s",
+ (void *)&standard_strategy->base,
+ aws_error_debug_str(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_hash_table_init(
+ &standard_strategy->synced_data.token_buckets,
+ allocator,
+ 16u,
+ s_hash_partition_id,
+ s_partition_id_equals_byte_cur,
+ NULL,
+ s_destroy_standard_retry_bucket)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: token bucket table creation failed: %s",
+ (void *)&standard_strategy->base,
+ aws_error_debug_str(aws_last_error()));
+ goto error;
+ }
+
+ standard_strategy->max_capacity =
+ config->initial_bucket_capacity ? config->initial_bucket_capacity : s_initial_retry_bucket_capacity;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_STANDARD_RETRY_STRATEGY,
+ "id=%p: maximum bucket capacity set to %zu",
+ (void *)&standard_strategy->base,
+ standard_strategy->max_capacity);
+ AWS_FATAL_ASSERT(!aws_mutex_init(&standard_strategy->synced_data.lock) && "mutex init failed");
+
+ standard_strategy->base.allocator = allocator;
+ standard_strategy->base.vtable = &s_standard_retry_vtable;
+ standard_strategy->base.impl = standard_strategy;
+ return &standard_strategy->base;
+
+error:
+ if (standard_strategy->exponential_backoff_retry_strategy) {
+ aws_retry_strategy_release(standard_strategy->exponential_backoff_retry_strategy);
+ }
+
+ aws_mem_release(allocator, standard_strategy);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-io/source/stream.c b/contrib/restricted/aws/aws-c-io/source/stream.c
index 69c73ab243..4e49844e96 100644
--- a/contrib/restricted/aws/aws-c-io/source/stream.c
+++ b/contrib/restricted/aws/aws-c-io/source/stream.c
@@ -5,15 +5,12 @@
#include <aws/io/stream.h>
+#include <aws/common/file.h>
#include <aws/io/file_utils.h>
#include <errno.h>
-#if _MSC_VER
-# pragma warning(disable : 4996) /* fopen */
-#endif
-
-int aws_input_stream_seek(struct aws_input_stream *stream, aws_off_t offset, enum aws_stream_seek_basis basis) {
+int aws_input_stream_seek(struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) {
AWS_ASSERT(stream && stream->vtable && stream->vtable->seek);
return stream->vtable->seek(stream, offset, basis);
@@ -62,75 +59,67 @@ int aws_input_stream_get_length(struct aws_input_stream *stream, int64_t *out_le
return stream->vtable->get_length(stream, out_length);
}
-void aws_input_stream_destroy(struct aws_input_stream *stream) {
- if (stream != NULL) {
- AWS_ASSERT(stream->vtable && stream->vtable->destroy);
-
- stream->vtable->destroy(stream);
- }
-}
-
/*
* cursor stream implementation
*/
struct aws_input_stream_byte_cursor_impl {
+ struct aws_input_stream base;
+ struct aws_allocator *allocator;
struct aws_byte_cursor original_cursor;
struct aws_byte_cursor current_cursor;
};
/*
* This is an ugly function that, in the absence of better guidance, is designed to handle all possible combinations of
- * aws_off_t (int32_t, int64_t) x all possible combinations of size_t (uint32_t, uint64_t). Whether the anomalous
- * combination of int64_t vs. uint32_t is even possible on any real platform is unknown. If size_t ever exceeds 64 bits
- * this function will fail badly.
+ * size_t (uint32_t, uint64_t). If size_t ever exceeds 64 bits this function will fail badly.
*
* Safety and invariant assumptions are sprinkled via comments. The overall strategy is to cast up to 64 bits and
* perform all arithmetic there, being careful with signed vs. unsigned to prevent bad operations.
*
- * Assumption #1: aws_off_t resolves to a signed integer 64 bits or smaller
- * Assumption #2: size_t resolves to an unsigned integer 64 bits or smaller
+ * Assumption #1: size_t resolves to an unsigned integer 64 bits or smaller
*/
-AWS_STATIC_ASSERT(sizeof(aws_off_t) <= 8);
AWS_STATIC_ASSERT(sizeof(size_t) <= 8);
static int s_aws_input_stream_byte_cursor_seek(
struct aws_input_stream *stream,
- aws_off_t offset,
+ int64_t offset,
enum aws_stream_seek_basis basis) {
- struct aws_input_stream_byte_cursor_impl *impl = stream->impl;
+ struct aws_input_stream_byte_cursor_impl *impl =
+ AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base);
uint64_t final_offset = 0;
- int64_t checked_offset = offset; /* safe by assumption 1 */
switch (basis) {
case AWS_SSB_BEGIN:
/*
- * (uint64_t)checked_offset -- safe by virtue of the earlier is-negative check + Assumption 1
- * (uint64_t)impl->original_cursor.len -- safe via assumption 2
+ * (uint64_t)offset -- safe by virtue of the earlier is-negative check
+ * (uint64_t)impl->original_cursor.len -- safe via assumption 1
*/
- if (checked_offset < 0 || (uint64_t)checked_offset > (uint64_t)impl->original_cursor.len) {
+ if (offset < 0 || (uint64_t)offset > (uint64_t)impl->original_cursor.len) {
return aws_raise_error(AWS_IO_STREAM_INVALID_SEEK_POSITION);
}
/* safe because negative offsets were turned into an error */
- final_offset = (uint64_t)checked_offset;
+ final_offset = (uint64_t)offset;
break;
case AWS_SSB_END:
/*
- * -checked_offset -- safe as long checked_offset is not INT64_MIN which was previously checked
- * (uint64_t)(-checked_offset) -- safe because (-checked_offset) is positive (and < INT64_MAX < UINT64_MAX)
+ * -offset -- safe as long offset is not INT64_MIN which was previously checked
+ * (uint64_t)(-offset) -- safe because (-offset) is positive (and < INT64_MAX < UINT64_MAX)
*/
- if (checked_offset > 0 || checked_offset == INT64_MIN ||
- (uint64_t)(-checked_offset) > (uint64_t)impl->original_cursor.len) {
+ if (offset > 0 || offset == INT64_MIN || (uint64_t)(-offset) > (uint64_t)impl->original_cursor.len) {
return aws_raise_error(AWS_IO_STREAM_INVALID_SEEK_POSITION);
}
/* cases that would make this unsafe became errors with previous conditional */
- final_offset = (uint64_t)impl->original_cursor.len - (uint64_t)(-checked_offset);
+ final_offset = (uint64_t)impl->original_cursor.len - (uint64_t)(-offset);
break;
+
+ default:
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
/* true because we already validated against (impl->original_cursor.len) which is <= SIZE_MAX */
@@ -140,11 +129,10 @@ static int s_aws_input_stream_byte_cursor_seek(
size_t final_offset_sz = (size_t)final_offset;
/* sanity */
- AWS_ASSERT(final_offset_sz <= impl->current_cursor.len);
+ AWS_ASSERT(final_offset_sz <= impl->original_cursor.len);
+ /* reset current_cursor to new position */
impl->current_cursor = impl->original_cursor;
-
- /* let's skip advance */
impl->current_cursor.ptr += final_offset_sz;
impl->current_cursor.len -= final_offset_sz;
@@ -152,7 +140,8 @@ static int s_aws_input_stream_byte_cursor_seek(
}
static int s_aws_input_stream_byte_cursor_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) {
- struct aws_input_stream_byte_cursor_impl *impl = stream->impl;
+ struct aws_input_stream_byte_cursor_impl *impl =
+ AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base);
size_t actually_read = dest->capacity - dest->len;
if (actually_read > impl->current_cursor.len) {
@@ -171,7 +160,8 @@ static int s_aws_input_stream_byte_cursor_read(struct aws_input_stream *stream,
static int s_aws_input_stream_byte_cursor_get_status(
struct aws_input_stream *stream,
struct aws_stream_status *status) {
- struct aws_input_stream_byte_cursor_impl *impl = stream->impl;
+ struct aws_input_stream_byte_cursor_impl *impl =
+ AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base);
status->is_end_of_stream = impl->current_cursor.len == 0;
status->is_valid = true;
@@ -180,7 +170,8 @@ static int s_aws_input_stream_byte_cursor_get_status(
}
static int s_aws_input_stream_byte_cursor_get_length(struct aws_input_stream *stream, int64_t *out_length) {
- struct aws_input_stream_byte_cursor_impl *impl = stream->impl;
+ struct aws_input_stream_byte_cursor_impl *impl =
+ AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base);
#if SIZE_MAX > INT64_MAX
size_t length = impl->original_cursor.len;
@@ -194,8 +185,8 @@ static int s_aws_input_stream_byte_cursor_get_length(struct aws_input_stream *st
return AWS_OP_SUCCESS;
}
-static void s_aws_input_stream_byte_cursor_destroy(struct aws_input_stream *stream) {
- aws_mem_release(stream->allocator, stream);
+static void s_aws_input_stream_byte_cursor_destroy(struct aws_input_stream_byte_cursor_impl *impl) {
+ aws_mem_release(impl->allocator, impl);
}
static struct aws_input_stream_vtable s_aws_input_stream_byte_cursor_vtable = {
@@ -203,53 +194,40 @@ static struct aws_input_stream_vtable s_aws_input_stream_byte_cursor_vtable = {
.read = s_aws_input_stream_byte_cursor_read,
.get_status = s_aws_input_stream_byte_cursor_get_status,
.get_length = s_aws_input_stream_byte_cursor_get_length,
- .destroy = s_aws_input_stream_byte_cursor_destroy};
+};
struct aws_input_stream *aws_input_stream_new_from_cursor(
struct aws_allocator *allocator,
const struct aws_byte_cursor *cursor) {
- struct aws_input_stream *input_stream = NULL;
- struct aws_input_stream_byte_cursor_impl *impl = NULL;
-
- aws_mem_acquire_many(
- allocator,
- 2,
- &input_stream,
- sizeof(struct aws_input_stream),
- &impl,
- sizeof(struct aws_input_stream_byte_cursor_impl));
-
- if (!input_stream) {
- return NULL;
- }
-
- AWS_ZERO_STRUCT(*input_stream);
- AWS_ZERO_STRUCT(*impl);
-
- input_stream->allocator = allocator;
- input_stream->vtable = &s_aws_input_stream_byte_cursor_vtable;
- input_stream->impl = impl;
+ struct aws_input_stream_byte_cursor_impl *impl =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_input_stream_byte_cursor_impl));
+ impl->allocator = allocator;
impl->original_cursor = *cursor;
impl->current_cursor = *cursor;
+ impl->base.vtable = &s_aws_input_stream_byte_cursor_vtable;
+ aws_ref_count_init(
+ &impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_stream_byte_cursor_destroy);
- return input_stream;
+ return &impl->base;
}
/*
* file-based input stream
*/
struct aws_input_stream_file_impl {
+ struct aws_input_stream base;
+ struct aws_allocator *allocator;
FILE *file;
bool close_on_clean_up;
};
static int s_aws_input_stream_file_seek(
struct aws_input_stream *stream,
- aws_off_t offset,
+ int64_t offset,
enum aws_stream_seek_basis basis) {
- struct aws_input_stream_file_impl *impl = stream->impl;
+ struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base);
int whence = (basis == AWS_SSB_BEGIN) ? SEEK_SET : SEEK_END;
if (aws_fseek(impl->file, offset, whence)) {
@@ -260,7 +238,7 @@ static int s_aws_input_stream_file_seek(
}
static int s_aws_input_stream_file_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) {
- struct aws_input_stream_file_impl *impl = stream->impl;
+ struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base);
size_t max_read = dest->capacity - dest->len;
size_t actually_read = fread(dest->buffer + dest->len, 1, max_read, impl->file);
@@ -276,7 +254,7 @@ static int s_aws_input_stream_file_read(struct aws_input_stream *stream, struct
}
static int s_aws_input_stream_file_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) {
- struct aws_input_stream_file_impl *impl = stream->impl;
+ struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base);
status->is_end_of_stream = feof(impl->file) != 0;
status->is_valid = ferror(impl->file) == 0;
@@ -285,19 +263,17 @@ static int s_aws_input_stream_file_get_status(struct aws_input_stream *stream, s
}
static int s_aws_input_stream_file_get_length(struct aws_input_stream *stream, int64_t *length) {
- struct aws_input_stream_file_impl *impl = stream->impl;
+ struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base);
return aws_file_get_length(impl->file, length);
}
-static void s_aws_input_stream_file_destroy(struct aws_input_stream *stream) {
- struct aws_input_stream_file_impl *impl = stream->impl;
+static void s_aws_input_stream_file_destroy(struct aws_input_stream_file_impl *impl) {
if (impl->close_on_clean_up && impl->file) {
fclose(impl->file);
}
-
- aws_mem_release(stream->allocator, stream);
+ aws_mem_release(impl->allocator, impl);
}
static struct aws_input_stream_vtable s_aws_input_stream_file_vtable = {
@@ -305,64 +281,64 @@ static struct aws_input_stream_vtable s_aws_input_stream_file_vtable = {
.read = s_aws_input_stream_file_read,
.get_status = s_aws_input_stream_file_get_status,
.get_length = s_aws_input_stream_file_get_length,
- .destroy = s_aws_input_stream_file_destroy};
+};
struct aws_input_stream *aws_input_stream_new_from_file(struct aws_allocator *allocator, const char *file_name) {
- struct aws_input_stream *input_stream = NULL;
- struct aws_input_stream_file_impl *impl = NULL;
-
- aws_mem_acquire_many(
- allocator, 2, &input_stream, sizeof(struct aws_input_stream), &impl, sizeof(struct aws_input_stream_file_impl));
-
- if (!input_stream) {
- return NULL;
- }
-
- AWS_ZERO_STRUCT(*input_stream);
- AWS_ZERO_STRUCT(*impl);
-
- input_stream->allocator = allocator;
- input_stream->vtable = &s_aws_input_stream_file_vtable;
- input_stream->impl = impl;
+ struct aws_input_stream_file_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_input_stream_file_impl));
- impl->file = fopen(file_name, "r");
+ impl->file = aws_fopen(file_name, "r+b");
if (impl->file == NULL) {
aws_translate_and_raise_io_error(errno);
goto on_error;
}
impl->close_on_clean_up = true;
+ impl->allocator = allocator;
+ impl->base.vtable = &s_aws_input_stream_file_vtable;
+ aws_ref_count_init(&impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_stream_file_destroy);
- return input_stream;
+ return &impl->base;
on_error:
-
- aws_input_stream_destroy(input_stream);
-
+ aws_mem_release(allocator, impl);
return NULL;
}
struct aws_input_stream *aws_input_stream_new_from_open_file(struct aws_allocator *allocator, FILE *file) {
- struct aws_input_stream *input_stream = NULL;
- struct aws_input_stream_file_impl *impl = NULL;
-
- aws_mem_acquire_many(
- allocator, 2, &input_stream, sizeof(struct aws_input_stream), &impl, sizeof(struct aws_input_stream_file_impl));
+ struct aws_input_stream_file_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_input_stream_file_impl));
- if (!input_stream) {
- return NULL;
- }
+ impl->file = file;
+ impl->close_on_clean_up = false;
+ impl->allocator = allocator;
- AWS_ZERO_STRUCT(*input_stream);
- AWS_ZERO_STRUCT(*impl);
+ impl->base.vtable = &s_aws_input_stream_file_vtable;
+ aws_ref_count_init(&impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_stream_file_destroy);
+ return &impl->base;
+}
- input_stream->allocator = allocator;
- input_stream->vtable = &s_aws_input_stream_file_vtable;
- input_stream->impl = impl;
+struct aws_input_stream *aws_input_stream_acquire(struct aws_input_stream *stream) {
+ if (stream != NULL) {
+ if (stream->vtable->acquire) {
+ stream->vtable->acquire(stream);
+ } else {
+ aws_ref_count_acquire(&stream->ref_count);
+ }
+ }
+ return stream;
+}
- impl->file = file;
- impl->close_on_clean_up = false;
+struct aws_input_stream *aws_input_stream_release(struct aws_input_stream *stream) {
+ if (stream != NULL) {
+ if (stream->vtable->release) {
+ stream->vtable->release(stream);
+ } else {
+ aws_ref_count_release(&stream->ref_count);
+ }
+ }
+ return NULL;
+}
- return input_stream;
+void aws_input_stream_destroy(struct aws_input_stream *stream) {
+ aws_input_stream_release(stream);
}
diff --git a/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c b/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c
index bf0f3be9f2..65acdf191d 100644
--- a/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c
+++ b/contrib/restricted/aws/aws-c-io/source/tls_channel_handler.c
@@ -6,6 +6,9 @@
#include <aws/io/channel.h>
#include <aws/io/file_utils.h>
#include <aws/io/logging.h>
+#include <aws/io/pkcs11.h>
+#include <aws/io/private/pem_utils.h>
+#include <aws/io/private/tls_channel_handler_shared.h>
#include <aws/io/tls_channel_handler.h>
#define AWS_DEFAULT_TLS_TIMEOUT_MS 10000
@@ -22,150 +25,255 @@ void aws_tls_ctx_options_init_default_client(struct aws_tls_ctx_options *options
}
void aws_tls_ctx_options_clean_up(struct aws_tls_ctx_options *options) {
- if (options->ca_file.len) {
- aws_byte_buf_clean_up(&options->ca_file);
- }
+ aws_byte_buf_clean_up(&options->ca_file);
+ aws_string_destroy(options->ca_path);
+ aws_byte_buf_clean_up(&options->certificate);
+ aws_byte_buf_clean_up_secure(&options->private_key);
- if (options->ca_path) {
- aws_string_destroy(options->ca_path);
- }
+#ifdef __APPLE__
+ aws_byte_buf_clean_up_secure(&options->pkcs12);
+ aws_byte_buf_clean_up_secure(&options->pkcs12_password);
- if (options->certificate.len) {
- aws_byte_buf_clean_up(&options->certificate);
- }
+# if !defined(AWS_OS_IOS)
+ aws_string_destroy(options->keychain_path);
+# endif
+#endif
+
+ aws_string_destroy(options->alpn_list);
+
+ aws_pkcs11_lib_release(options->pkcs11.lib);
+ aws_string_destroy_secure(options->pkcs11.user_pin);
+ aws_string_destroy(options->pkcs11.token_label);
+ aws_string_destroy(options->pkcs11.private_key_object_label);
+
+ AWS_ZERO_STRUCT(*options);
+}
+
+int aws_tls_ctx_options_init_client_mtls(
+ struct aws_tls_ctx_options *options,
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *cert,
+ const struct aws_byte_cursor *pkey) {
+
+#if !defined(AWS_OS_IOS)
- if (options->private_key.len) {
- aws_byte_buf_clean_up_secure(&options->private_key);
+ aws_tls_ctx_options_init_default_client(options, allocator);
+
+ if (aws_byte_buf_init_copy_from_cursor(&options->certificate, allocator, *cert)) {
+ goto error;
}
-#ifdef __APPLE__
- if (options->pkcs12.len) {
- aws_byte_buf_clean_up_secure(&options->pkcs12);
+ if (aws_sanitize_pem(&options->certificate, allocator)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid certificate. File must contain PEM encoded data");
+ goto error;
}
- if (options->pkcs12_password.len) {
- aws_byte_buf_clean_up_secure(&options->pkcs12_password);
+ if (aws_byte_buf_init_copy_from_cursor(&options->private_key, allocator, *pkey)) {
+ goto error;
}
-#endif
- if (options->alpn_list) {
- aws_string_destroy(options->alpn_list);
+ if (aws_sanitize_pem(&options->private_key, allocator)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid private key. File must contain PEM encoded data");
+ goto error;
}
+ return AWS_OP_SUCCESS;
+error:
+ aws_tls_ctx_options_clean_up(options);
+ return AWS_OP_ERR;
+
+#else
+ (void)allocator;
+ (void)cert;
+ (void)pkey;
AWS_ZERO_STRUCT(*options);
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PEM certificates");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
-static int s_load_null_terminated_buffer_from_cursor(
- struct aws_byte_buf *load_into,
+int aws_tls_ctx_options_init_client_mtls_from_path(
+ struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
- const struct aws_byte_cursor *from) {
- if (from->ptr[from->len - 1] == 0) {
- if (aws_byte_buf_init_copy_from_cursor(load_into, allocator, *from)) {
- return AWS_OP_ERR;
- }
+ const char *cert_path,
+ const char *pkey_path) {
- load_into->len -= 1;
- } else {
- if (aws_byte_buf_init(load_into, allocator, from->len + 1)) {
- return AWS_OP_ERR;
- }
+#if !defined(AWS_OS_IOS)
+ aws_tls_ctx_options_init_default_client(options, allocator);
- memcpy(load_into->buffer, from->ptr, from->len);
- load_into->buffer[from->len] = 0;
- load_into->len = from->len;
+ if (aws_byte_buf_init_from_file(&options->certificate, allocator, cert_path)) {
+ goto error;
+ }
+
+ if (aws_sanitize_pem(&options->certificate, allocator)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid certificate. File must contain PEM encoded data");
+ goto error;
+ }
+
+ if (aws_byte_buf_init_from_file(&options->private_key, allocator, pkey_path)) {
+ goto error;
+ }
+
+ if (aws_sanitize_pem(&options->private_key, allocator)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid private key. File must contain PEM encoded data");
+ goto error;
}
return AWS_OP_SUCCESS;
+error:
+ aws_tls_ctx_options_clean_up(options);
+ return AWS_OP_ERR;
+
+#else
+ (void)allocator;
+ (void)cert_path;
+ (void)pkey_path;
+ AWS_ZERO_STRUCT(*options);
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PEM certificates");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
-#if !defined(AWS_OS_IOS)
-
-int aws_tls_ctx_options_init_client_mtls(
+int aws_tls_ctx_options_init_client_mtls_with_pkcs11(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
- const struct aws_byte_cursor *cert,
- const struct aws_byte_cursor *pkey) {
+ const struct aws_tls_ctx_pkcs11_options *pkcs11_options) {
+
+#if defined(_WIN32) || defined(__APPLE__)
+ (void)allocator;
+ (void)pkcs11_options;
AWS_ZERO_STRUCT(*options);
- options->minimum_tls_version = AWS_IO_TLS_VER_SYS_DEFAULTS;
- options->cipher_pref = AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT;
- options->verify_peer = true;
- options->allocator = allocator;
- options->max_fragment_size = g_aws_channel_max_fragment_size;
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not currently support TLS with PKCS#11.");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#else
- /* s2n relies on null terminated c_strings, so we need to make sure we're properly
- * terminated, but we don't want length to reflect the terminator because
- * Apple and Windows will fail hard if you use a null terminator. */
- if (s_load_null_terminated_buffer_from_cursor(&options->certificate, allocator, cert)) {
- return AWS_OP_ERR;
+ aws_tls_ctx_options_init_default_client(options, allocator);
+
+ /* pkcs11_lib is required */
+ if (pkcs11_options->pkcs11_lib == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: A PKCS#11 library must be specified.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
}
+ options->pkcs11.lib = aws_pkcs11_lib_acquire(pkcs11_options->pkcs11_lib); /* cannot fail */
- if (s_load_null_terminated_buffer_from_cursor(&options->private_key, allocator, pkey)) {
- aws_byte_buf_clean_up(&options->certificate);
- return AWS_OP_ERR;
+ /* user_pin is optional */
+ if (pkcs11_options->user_pin.ptr != NULL) {
+ options->pkcs11.user_pin = aws_string_new_from_cursor(allocator, &pkcs11_options->user_pin);
+ }
+
+ /* slot_id is optional */
+ if (pkcs11_options->slot_id != NULL) {
+ options->pkcs11.slot_id = *pkcs11_options->slot_id;
+ options->pkcs11.has_slot_id = true;
+ }
+
+ /* token_label is optional */
+ if (pkcs11_options->token_label.ptr != NULL) {
+ options->pkcs11.token_label = aws_string_new_from_cursor(allocator, &pkcs11_options->token_label);
+ }
+
+ /* private_key_object_label is optional */
+ if (pkcs11_options->private_key_object_label.ptr != NULL) {
+ options->pkcs11.private_key_object_label =
+ aws_string_new_from_cursor(allocator, &pkcs11_options->private_key_object_label);
+ }
+
+ /* certificate required, but there are multiple ways to pass it in */
+ if ((pkcs11_options->cert_file_path.ptr != NULL) && (pkcs11_options->cert_file_contents.ptr != NULL)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS, "static: Both certificate filepath and contents are specified. Only one may be set.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ } else if (pkcs11_options->cert_file_path.ptr != NULL) {
+ struct aws_string *tmp_string = aws_string_new_from_cursor(allocator, &pkcs11_options->cert_file_path);
+ int op = aws_byte_buf_init_from_file(&options->certificate, allocator, aws_string_c_str(tmp_string));
+ aws_string_destroy(tmp_string);
+ if (op != AWS_OP_SUCCESS) {
+ goto error;
+ }
+ } else if (pkcs11_options->cert_file_contents.ptr != NULL) {
+ if (aws_byte_buf_init_copy_from_cursor(&options->certificate, allocator, pkcs11_options->cert_file_contents)) {
+ goto error;
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: A certificate must be specified.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ if (aws_sanitize_pem(&options->certificate, allocator)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid certificate. File must contain PEM encoded data");
+ goto error;
}
+ /* Success! */
return AWS_OP_SUCCESS;
+
+error:
+ aws_tls_ctx_options_clean_up(options);
+ return AWS_OP_ERR;
+#endif /* PLATFORM-SUPPORTS-PKCS11-TLS */
}
-int aws_tls_ctx_options_init_client_mtls_from_path(
+int aws_tls_ctx_options_set_keychain_path(
struct aws_tls_ctx_options *options,
- struct aws_allocator *allocator,
- const char *cert_path,
- const char *pkey_path) {
- AWS_ZERO_STRUCT(*options);
- options->minimum_tls_version = AWS_IO_TLS_VER_SYS_DEFAULTS;
- options->cipher_pref = AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT;
- options->verify_peer = true;
- options->allocator = allocator;
- options->max_fragment_size = g_aws_channel_max_fragment_size;
+ struct aws_byte_cursor *keychain_path_cursor) {
- if (aws_byte_buf_init_from_file(&options->certificate, allocator, cert_path)) {
- return AWS_OP_ERR;
- }
+#if defined(__APPLE__) && !defined(AWS_OS_IOS)
+ AWS_LOGF_WARN(AWS_LS_IO_TLS, "static: Keychain path is deprecated.");
- if (aws_byte_buf_init_from_file(&options->private_key, allocator, pkey_path)) {
- aws_byte_buf_clean_up(&options->certificate);
+ options->keychain_path = aws_string_new_from_cursor(options->allocator, keychain_path_cursor);
+ if (!options->keychain_path) {
return AWS_OP_ERR;
}
return AWS_OP_SUCCESS;
+#else
+ (void)options;
+ (void)keychain_path_cursor;
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Keychain path can only be set on MacOS.");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
-#endif /* AWS_OS_IOS */
-
-#ifdef _WIN32
-void aws_tls_ctx_options_init_client_mtls_from_system_path(
+int aws_tls_ctx_options_init_client_mtls_from_system_path(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
const char *cert_reg_path) {
- AWS_ZERO_STRUCT(*options);
- options->minimum_tls_version = AWS_IO_TLS_VER_SYS_DEFAULTS;
- options->verify_peer = true;
- options->allocator = allocator;
- options->max_fragment_size = g_aws_channel_max_fragment_size;
+
+#ifdef _WIN32
+ aws_tls_ctx_options_init_default_client(options, allocator);
options->system_certificate_path = cert_reg_path;
+ return AWS_OP_SUCCESS;
+#else
+ (void)allocator;
+ (void)cert_reg_path;
+ AWS_ZERO_STRUCT(*options);
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: System certificate path can only be set on Windows.");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
-void aws_tls_ctx_options_init_default_server_from_system_path(
+int aws_tls_ctx_options_init_default_server_from_system_path(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
const char *cert_reg_path) {
- aws_tls_ctx_options_init_client_mtls_from_system_path(options, allocator, cert_reg_path);
+ if (aws_tls_ctx_options_init_client_mtls_from_system_path(options, allocator, cert_reg_path)) {
+ return AWS_OP_ERR;
+ }
options->verify_peer = false;
+ return AWS_OP_SUCCESS;
}
-#endif /* _WIN32 */
-#ifdef __APPLE__
int aws_tls_ctx_options_init_client_mtls_pkcs12_from_path(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
const char *pkcs12_path,
struct aws_byte_cursor *pkcs_pwd) {
- AWS_ZERO_STRUCT(*options);
- options->minimum_tls_version = AWS_IO_TLS_VER_SYS_DEFAULTS;
- options->verify_peer = true;
- options->allocator = allocator;
- options->max_fragment_size = g_aws_channel_max_fragment_size;
+
+#ifdef __APPLE__
+ aws_tls_ctx_options_init_default_client(options, allocator);
if (aws_byte_buf_init_from_file(&options->pkcs12, allocator, pkcs12_path)) {
return AWS_OP_ERR;
@@ -177,6 +285,14 @@ int aws_tls_ctx_options_init_client_mtls_pkcs12_from_path(
}
return AWS_OP_SUCCESS;
+#else
+ (void)allocator;
+ (void)pkcs12_path;
+ (void)pkcs_pwd;
+ AWS_ZERO_STRUCT(*options);
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PKCS#12 files.");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
int aws_tls_ctx_options_init_client_mtls_pkcs12(
@@ -184,22 +300,28 @@ int aws_tls_ctx_options_init_client_mtls_pkcs12(
struct aws_allocator *allocator,
struct aws_byte_cursor *pkcs12,
struct aws_byte_cursor *pkcs_pwd) {
- AWS_ZERO_STRUCT(*options);
- options->minimum_tls_version = AWS_IO_TLS_VER_SYS_DEFAULTS;
- options->verify_peer = true;
- options->allocator = allocator;
- options->max_fragment_size = g_aws_channel_max_fragment_size;
- if (s_load_null_terminated_buffer_from_cursor(&options->pkcs12, allocator, pkcs12)) {
+#ifdef __APPLE__
+ aws_tls_ctx_options_init_default_client(options, allocator);
+
+ if (aws_byte_buf_init_copy_from_cursor(&options->pkcs12, allocator, *pkcs12)) {
return AWS_OP_ERR;
}
- if (s_load_null_terminated_buffer_from_cursor(&options->pkcs12_password, allocator, pkcs_pwd)) {
+ if (aws_byte_buf_init_copy_from_cursor(&options->pkcs12_password, allocator, *pkcs_pwd)) {
aws_byte_buf_clean_up_secure(&options->pkcs12);
return AWS_OP_ERR;
}
return AWS_OP_SUCCESS;
+#else
+ (void)allocator;
+ (void)pkcs12;
+ (void)pkcs_pwd;
+ AWS_ZERO_STRUCT(*options);
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PKCS#12 files.");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
int aws_tls_ctx_options_init_server_pkcs12_from_path(
@@ -228,21 +350,27 @@ int aws_tls_ctx_options_init_server_pkcs12(
return AWS_OP_SUCCESS;
}
-#endif /* __APPLE__ */
-
-#if !defined(AWS_OS_IOS)
-
int aws_tls_ctx_options_init_default_server_from_path(
struct aws_tls_ctx_options *options,
struct aws_allocator *allocator,
const char *cert_path,
const char *pkey_path) {
+
+#if !defined(AWS_OS_IOS)
if (aws_tls_ctx_options_init_client_mtls_from_path(options, allocator, cert_path, pkey_path)) {
return AWS_OP_ERR;
}
options->verify_peer = false;
return AWS_OP_SUCCESS;
+#else
+ (void)allocator;
+ (void)cert_path;
+ (void)pkey_path;
+ AWS_ZERO_STRUCT(*options);
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Cannot create a server on this platform.");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
int aws_tls_ctx_options_init_default_server(
@@ -250,16 +378,24 @@ int aws_tls_ctx_options_init_default_server(
struct aws_allocator *allocator,
struct aws_byte_cursor *cert,
struct aws_byte_cursor *pkey) {
+
+#if !defined(AWS_OS_IOS)
if (aws_tls_ctx_options_init_client_mtls(options, allocator, cert, pkey)) {
return AWS_OP_ERR;
}
options->verify_peer = false;
return AWS_OP_SUCCESS;
+#else
+ (void)allocator;
+ (void)cert;
+ (void)pkey;
+ AWS_ZERO_STRUCT(*options);
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Cannot create a server on this platform.");
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
}
-#endif /* AWS_OS_IOS */
-
int aws_tls_ctx_options_set_alpn_list(struct aws_tls_ctx_options *options, const char *alpn_list) {
options->alpn_list = aws_string_new_from_c_str(options->allocator, alpn_list);
if (!options->alpn_list) {
@@ -284,34 +420,83 @@ int aws_tls_ctx_options_override_default_trust_store_from_path(
const char *ca_path,
const char *ca_file) {
+ /* Note: on success these are not cleaned up, their data is "moved" into the options struct */
+ struct aws_string *ca_path_tmp = NULL;
+ struct aws_byte_buf ca_file_tmp;
+ AWS_ZERO_STRUCT(ca_file_tmp);
+
if (ca_path) {
- options->ca_path = aws_string_new_from_c_str(options->allocator, ca_path);
- if (!options->ca_path) {
- return AWS_OP_ERR;
+ if (options->ca_path) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: cannot override trust store multiple times");
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ ca_path_tmp = aws_string_new_from_c_str(options->allocator, ca_path);
+ if (!ca_path_tmp) {
+ goto error;
}
}
if (ca_file) {
- if (aws_byte_buf_init_from_file(&options->ca_file, options->allocator, ca_file)) {
- return AWS_OP_ERR;
+ if (aws_tls_options_buf_is_set(&options->ca_file)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: cannot override trust store multiple times");
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ if (aws_byte_buf_init_from_file(&ca_file_tmp, options->allocator, ca_file)) {
+ goto error;
+ }
+
+ if (aws_sanitize_pem(&ca_file_tmp, options->allocator)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid CA file. File must contain PEM encoded data");
+ goto error;
}
}
+ /* Success, set new values. (no need to clean up old values, we checked earlier that they were unallocated) */
+ if (ca_path) {
+ options->ca_path = ca_path_tmp;
+ }
+ if (ca_file) {
+ options->ca_file = ca_file_tmp;
+ }
return AWS_OP_SUCCESS;
+
+error:
+ aws_string_destroy_secure(ca_path_tmp);
+ aws_byte_buf_clean_up_secure(&ca_file_tmp);
+ return AWS_OP_ERR;
+}
+
+void aws_tls_ctx_options_set_extension_data(struct aws_tls_ctx_options *options, void *extension_data) {
+ options->ctx_options_extension = extension_data;
}
int aws_tls_ctx_options_override_default_trust_store(
struct aws_tls_ctx_options *options,
const struct aws_byte_cursor *ca_file) {
- /* s2n relies on null terminated c_strings, so we need to make sure we're properly
- * terminated, but we don't want length to reflect the terminator because
- * Apple and Windows will fail hard if you use a null terminator. */
- if (s_load_null_terminated_buffer_from_cursor(&options->ca_file, options->allocator, ca_file)) {
- return AWS_OP_ERR;
+ if (aws_tls_options_buf_is_set(&options->ca_file)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: cannot override trust store multiple times");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ if (aws_byte_buf_init_copy_from_cursor(&options->ca_file, options->allocator, *ca_file)) {
+ goto error;
+ }
+
+ if (aws_sanitize_pem(&options->ca_file, options->allocator)) {
+ AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid CA file. File must contain PEM encoded data");
+ goto error;
}
return AWS_OP_SUCCESS;
+
+error:
+ aws_byte_buf_clean_up_secure(&options->ca_file);
+ return AWS_OP_ERR;
}
void aws_tls_connection_options_init_from_ctx(
@@ -328,6 +513,10 @@ void aws_tls_connection_options_init_from_ctx(
int aws_tls_connection_options_copy(
struct aws_tls_connection_options *to,
const struct aws_tls_connection_options *from) {
+
+ /* clean up the options before copy. */
+ aws_tls_connection_options_clean_up(to);
+
/* copy everything copyable over, then override the rest with deep copies. */
*to = *from;
@@ -383,6 +572,12 @@ int aws_tls_connection_options_set_server_name(
struct aws_tls_connection_options *conn_options,
struct aws_allocator *allocator,
struct aws_byte_cursor *server_name) {
+
+ if (conn_options->server_name != NULL) {
+ aws_string_destroy(conn_options->server_name);
+ conn_options->server_name = NULL;
+ }
+
conn_options->server_name = aws_string_new_from_cursor(allocator, server_name);
if (!conn_options->server_name) {
return AWS_OP_ERR;
@@ -396,6 +591,11 @@ int aws_tls_connection_options_set_alpn_list(
struct aws_allocator *allocator,
const char *alpn_list) {
+ if (conn_options->alpn_list != NULL) {
+ aws_string_destroy(conn_options->alpn_list);
+ conn_options->alpn_list = NULL;
+ }
+
conn_options->alpn_list = aws_string_new_from_c_str(allocator, alpn_list);
if (!conn_options->alpn_list) {
return AWS_OP_ERR;
@@ -404,6 +604,84 @@ int aws_tls_connection_options_set_alpn_list(
return AWS_OP_SUCCESS;
}
+#ifdef BYO_CRYPTO
+
+struct aws_tls_ctx *aws_tls_server_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) {
+ (void)alloc;
+ (void)options;
+ AWS_FATAL_ASSERT(
+ false &&
+ "When using BYO_CRYPTO, user is responsible for creating aws_tls_ctx manually. You cannot call this function.");
+}
+
+struct aws_tls_ctx *aws_tls_client_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) {
+ (void)alloc;
+ (void)options;
+ AWS_FATAL_ASSERT(
+ false &&
+ "When using BYO_CRYPTO, user is responsible for creating aws_tls_ctx manually. You cannot call this function.");
+}
+
+static aws_tls_handler_new_fn *s_client_handler_new = NULL;
+static aws_tls_client_handler_start_negotiation_fn *s_start_negotiation_fn = NULL;
+static void *s_client_user_data = NULL;
+
+static aws_tls_handler_new_fn *s_server_handler_new = NULL;
+static void *s_server_user_data = NULL;
+
+struct aws_channel_handler *aws_tls_client_handler_new(
+ struct aws_allocator *allocator,
+ struct aws_tls_connection_options *options,
+ struct aws_channel_slot *slot) {
+ AWS_FATAL_ASSERT(
+ s_client_handler_new &&
+ "For BYO_CRYPTO, you must call aws_tls_client_handler_new_set_callback() with a non-null value.");
+ return s_client_handler_new(allocator, options, slot, s_client_user_data);
+}
+
+struct aws_channel_handler *aws_tls_server_handler_new(
+ struct aws_allocator *allocator,
+ struct aws_tls_connection_options *options,
+ struct aws_channel_slot *slot) {
+ AWS_FATAL_ASSERT(
+ s_client_handler_new &&
+ "For BYO_CRYPTO, you must call aws_tls_server_handler_new_set_callback() with a non-null value.")
+ return s_server_handler_new(allocator, options, slot, s_server_user_data);
+}
+
+void aws_tls_byo_crypto_set_client_setup_options(const struct aws_tls_byo_crypto_setup_options *options) {
+ AWS_FATAL_ASSERT(options);
+ AWS_FATAL_ASSERT(options->new_handler_fn);
+ AWS_FATAL_ASSERT(options->start_negotiation_fn);
+
+ s_client_handler_new = options->new_handler_fn;
+ s_start_negotiation_fn = options->start_negotiation_fn;
+ s_client_user_data = options->user_data;
+}
+
+void aws_tls_byo_crypto_set_server_setup_options(const struct aws_tls_byo_crypto_setup_options *options) {
+ AWS_FATAL_ASSERT(options);
+ AWS_FATAL_ASSERT(options->new_handler_fn);
+
+ s_server_handler_new = options->new_handler_fn;
+ s_server_user_data = options->user_data;
+}
+
+int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler) {
+ AWS_FATAL_ASSERT(
+ s_start_negotiation_fn &&
+ "For BYO_CRYPTO, you must call aws_tls_client_handler_set_start_negotiation_callback() with a non-null value.")
+ return s_start_negotiation_fn(handler, s_client_user_data);
+}
+
+void aws_tls_init_static_state(struct aws_allocator *alloc) {
+ (void)alloc;
+}
+
+void aws_tls_clean_up_static_state(void) {}
+
+#endif /* BYO_CRYPTO */
+
int aws_channel_setup_client_tls(
struct aws_channel_slot *right_of_slot,
struct aws_tls_connection_options *tls_options) {
diff --git a/contrib/restricted/aws/aws-c-io/source/tls_channel_handler_shared.c b/contrib/restricted/aws/aws-c-io/source/tls_channel_handler_shared.c
index 0a35e78b67..884b09f6f1 100644
--- a/contrib/restricted/aws/aws-c-io/source/tls_channel_handler_shared.c
+++ b/contrib/restricted/aws/aws-c-io/source/tls_channel_handler_shared.c
@@ -63,3 +63,7 @@ void aws_on_tls_negotiation_completed(struct aws_tls_channel_handler_shared *tls
aws_channel_current_clock_time(
tls_handler_shared->handler->slot->channel, &tls_handler_shared->stats.handshake_end_ns);
}
+
+bool aws_tls_options_buf_is_set(const struct aws_byte_buf *buf) {
+ return buf->allocator != NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-io/source/uri.c b/contrib/restricted/aws/aws-c-io/source/uri.c
index bb0cf01ae4..a03a998c5f 100644
--- a/contrib/restricted/aws/aws-c-io/source/uri.c
+++ b/contrib/restricted/aws/aws-c-io/source/uri.c
@@ -327,18 +327,41 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor
struct aws_byte_cursor authority_parse_csr = parser->uri->authority;
if (authority_parse_csr.len) {
+ /* RFC-3986 section 3.2: authority = [ userinfo "@" ] host [ ":" port ] */
+ uint8_t *userinfo_delim = memchr(authority_parse_csr.ptr, '@', authority_parse_csr.len);
+ if (userinfo_delim) {
+
+ parser->uri->userinfo =
+ aws_byte_cursor_advance(&authority_parse_csr, userinfo_delim - authority_parse_csr.ptr);
+ /* For the "@" mark */
+ aws_byte_cursor_advance(&authority_parse_csr, 1);
+ struct aws_byte_cursor userinfo_parse_csr = parser->uri->userinfo;
+ uint8_t *info_delim = memchr(userinfo_parse_csr.ptr, ':', userinfo_parse_csr.len);
+ /* RFC-3986 section 3.2.1: Use of the format "user:password" in the userinfo field is deprecated. But we
+ * treat the userinfo as URL here, also, if the format is not following URL pattern, you have the whole
+ * userinfo */
+ /* RFC-1738 section 3.1: <user>:<password> */
+ if (info_delim) {
+ parser->uri->user.ptr = userinfo_parse_csr.ptr;
+ parser->uri->user.len = info_delim - userinfo_parse_csr.ptr;
+ parser->uri->password.ptr = info_delim + 1;
+ parser->uri->password.len = parser->uri->userinfo.len - parser->uri->user.len - 1;
+ } else {
+ parser->uri->user = userinfo_parse_csr;
+ }
+ }
uint8_t *port_delim = memchr(authority_parse_csr.ptr, ':', authority_parse_csr.len);
if (!port_delim) {
parser->uri->port = 0;
- parser->uri->host_name = parser->uri->authority;
+ parser->uri->host_name = authority_parse_csr;
return;
}
parser->uri->host_name.ptr = authority_parse_csr.ptr;
parser->uri->host_name.len = port_delim - authority_parse_csr.ptr;
- size_t port_len = parser->uri->authority.len - parser->uri->host_name.len - 1;
+ size_t port_len = authority_parse_csr.len - parser->uri->host_name.len - 1;
port_delim += 1;
for (size_t i = 0; i < port_len; ++i) {
if (!aws_isdigit(port_delim[i])) {
@@ -437,23 +460,21 @@ static void s_unchecked_append_canonicalized_path_character(struct aws_byte_buf
}
switch (value) {
+ /* non-alpha-numeric unreserved, don't % encode them */
case '-':
case '_':
case '.':
case '~':
- case '$':
- case '&':
- case ',':
+
+ /* reserved characters that we should not % encode in the path component */
case '/':
- case ':':
- case ';':
- case '=':
- case '@': {
++buffer->len;
*dest_ptr = value;
return;
- }
+ /*
+ * everything else we should % encode, including from the reserved list
+ */
default:
buffer->len += 3;
*dest_ptr++ = '%';