summaryrefslogtreecommitdiffstats
path: root/contrib/restricted/aws/aws-c-http
diff options
context:
space:
mode:
authordakovalkov <[email protected]>2023-12-03 13:33:55 +0300
committerdakovalkov <[email protected]>2023-12-03 14:04:39 +0300
commit2a718325637e5302334b6d0a6430f63168f8dbb3 (patch)
tree64be81080b7df9ec1d86d053a0c394ae53fcf1fe /contrib/restricted/aws/aws-c-http
parente0d94a470142d95c3007e9c5d80380994940664a (diff)
Update contrib/libs/aws-sdk-cpp to 1.11.37
Diffstat (limited to 'contrib/restricted/aws/aws-c-http')
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt70
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt70
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt71
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt71
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt70
-rw-r--r--contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-http/CONTRIBUTING.md61
-rw-r--r--contrib/restricted/aws/aws-c-http/LICENSE202
-rw-r--r--contrib/restricted/aws/aws-c-http/NOTICE3
-rw-r--r--contrib/restricted/aws/aws-c-http/README.md61
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/connection.h679
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h194
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/exports.h29
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/http.h158
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h215
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h210
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h50
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h46
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h201
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h90
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h140
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h123
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h289
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h121
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h299
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h190
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h297
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def74
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h199
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h100
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h236
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h86
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h69
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h84
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h79
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h57
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h115
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h570
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h1072
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/server.h198
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h75
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h82
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h483
-rw-r--r--contrib/restricted/aws/aws-c-http/source/connection.c1200
-rw-r--r--contrib/restricted/aws/aws-c-http/source/connection_manager.c1560
-rw-r--r--contrib/restricted/aws/aws-c-http/source/connection_monitor.c235
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_connection.c2064
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_decoder.c761
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_encoder.c915
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_stream.c535
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_connection.c2850
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_decoder.c1592
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_frames.c1233
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_stream.c1321
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack.c525
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack_decoder.c446
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack_encoder.c418
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c2337
-rw-r--r--contrib/restricted/aws/aws-c-http/source/http.c565
-rw-r--r--contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c1238
-rw-r--r--contrib/restricted/aws/aws-c-http/source/proxy_connection.c1658
-rw-r--r--contrib/restricted/aws/aws-c-http/source/proxy_strategy.c1703
-rw-r--r--contrib/restricted/aws/aws-c-http/source/random_access_set.c187
-rw-r--r--contrib/restricted/aws/aws-c-http/source/request_response.c1228
-rw-r--r--contrib/restricted/aws/aws-c-http/source/statistics.c35
-rw-r--r--contrib/restricted/aws/aws-c-http/source/strutil.c232
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket.c1790
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c866
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket_decoder.c387
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket_encoder.c375
-rw-r--r--contrib/restricted/aws/aws-c-http/ya.make80
72 files changed, 35948 insertions, 0 deletions
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt
new file mode 100644
index 00000000000..2df1842086b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 00000000000..2df1842086b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt
new file mode 100644
index 00000000000..4cef4bd81eb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt
new file mode 100644
index 00000000000..4cef4bd81eb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.txt
new file mode 100644
index 00000000000..2dce3a77fe3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt
new file mode 100644
index 00000000000..2df1842086b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000000..3b64466870c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
[email protected] with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-http/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-http/CONTRIBUTING.md
new file mode 100644
index 00000000000..d79975fab05
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CONTRIBUTING.md
@@ -0,0 +1,61 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-http/issues), or [recently closed](https://github.com/awslabs/aws-c-http/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-http/labels/help%20wanted) issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
[email protected] with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](https://github.com/awslabs/aws-c-http/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+
+We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-c-http/LICENSE b/contrib/restricted/aws/aws-c-http/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/restricted/aws/aws-c-http/NOTICE b/contrib/restricted/aws/aws-c-http/NOTICE
new file mode 100644
index 00000000000..6ac9e1e1186
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/NOTICE
@@ -0,0 +1,3 @@
+AWS C Http
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+SPDX-License-Identifier: Apache-2.0.
diff --git a/contrib/restricted/aws/aws-c-http/README.md b/contrib/restricted/aws/aws-c-http/README.md
new file mode 100644
index 00000000000..e92425af2bc
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/README.md
@@ -0,0 +1,61 @@
+## AWS C Http
+
+C99 implementation of the HTTP/1.1 and HTTP/2 specifications
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+CMake 3.1+ is required to build.
+
+`<install-path>` must be an absolute path in the following instructions.
+
+#### Linux-Only Dependencies
+
+If you are building on Linux, you will need to build aws-lc and s2n-tls first.
+
+```
+git clone [email protected]:awslabs/aws-lc.git
+cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-lc/build --target install
+
+git clone [email protected]:aws/s2n-tls.git
+cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build s2n-tls/build --target install
+```
+
+#### Building aws-c-http and Remaining Dependencies
+
+```
+git clone [email protected]:awslabs/aws-c-common.git
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-c-common/build --target install
+
+git clone [email protected]:awslabs/aws-c-cal.git
+cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-cal/build --target install
+
+git clone [email protected]:awslabs/aws-c-io.git
+cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-io/build --target install
+
+git clone [email protected]:awslabs/aws-c-compression.git
+cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-compression/build --target install
+
+git clone [email protected]:awslabs/aws-c-http.git
+cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-http/build --target install
+```
+
+#### Run Integration Tests with localhost
+
+To run some of the integration tests (start with localhost_integ_*), you need to set up a localhost that echo the request headers from `/echo` back first.
+
+To do that, check [localhost](./tests/py_localhost/) script we have.
+
+After that, configure and build your cmake project with `-DENABLE_LOCALHOST_INTEGRATION_TESTS=true` to build the tests with localhost and run them from `ctest --output-on-failure -R localhost_integ_*`.
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h
new file mode 100644
index 00000000000..e6362c1439e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h
@@ -0,0 +1,679 @@
+#ifndef AWS_HTTP_CONNECTION_H
+#define AWS_HTTP_CONNECTION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_client_bootstrap;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+struct aws_http2_setting;
+struct proxy_env_var_settings;
+
+/**
+ * An HTTP connection.
+ * This type is used by both server-side and client-side connections.
+ * This type is also used by all supported versions of HTTP.
+ */
+struct aws_http_connection;
+
+/**
+ * Invoked when connect completes.
+ *
+ * If unsuccessful, error_code will be set, connection will be NULL,
+ * and the on_shutdown callback will never be invoked.
+ *
+ * If successful, error_code will be 0 and connection will be valid.
+ * The user is now responsible for the connection and must
+ * call aws_http_connection_release() when they are done with it.
+ *
+ * The connection uses one event-loop thread to do all its work.
+ * The thread invoking this callback will be the same thread that invokes all
+ * future callbacks for this connection and its streams.
+ */
+typedef void(
+ aws_http_on_client_connection_setup_fn)(struct aws_http_connection *connection, int error_code, void *user_data);
+
+/**
+ * Invoked when the connection has finished shutting down.
+ * Never invoked if on_setup failed.
+ * This is always invoked on connection's event-loop thread.
+ * Note that the connection is not completely done until on_shutdown has been invoked
+ * AND aws_http_connection_release() has been called.
+ */
+typedef void(
+ aws_http_on_client_connection_shutdown_fn)(struct aws_http_connection *connection, int error_code, void *user_data);
+
+/**
+ * Invoked when the HTTP/2 settings change is complete.
+ * If connection setup successfully this will always be invoked whether settings change successfully or unsuccessfully.
+ * If error_code is AWS_ERROR_SUCCESS (0), then the peer has acknowledged the settings and the change has been applied.
+ * If error_code is non-zero, then a connection error occurred before the settings could be fully acknowledged and
+ * applied. This is always invoked on the connection's event-loop thread.
+ */
+typedef void(aws_http2_on_change_settings_complete_fn)(
+ struct aws_http_connection *http2_connection,
+ int error_code,
+ void *user_data);
+
+/**
+ * Invoked when the HTTP/2 PING completes, whether peer has acknowledged it or not.
+ * If error_code is AWS_ERROR_SUCCESS (0), then the peer has acknowledged the PING and round_trip_time_ns will be the
+ * round trip time in nano seconds for the connection.
+ * If error_code is non-zero, then a connection error occurred before the PING get acknowledgment and round_trip_time_ns
+ * will be useless in this case.
+ */
+typedef void(aws_http2_on_ping_complete_fn)(
+ struct aws_http_connection *http2_connection,
+ uint64_t round_trip_time_ns,
+ int error_code,
+ void *user_data);
+
+/**
+ * Invoked when an HTTP/2 GOAWAY frame is received from peer.
+ * Implies that the peer has initiated shutdown, or encountered a serious error.
+ * Once a GOAWAY is received, no further streams may be created on this connection.
+ *
+ * @param http2_connection This HTTP/2 connection.
+ * @param last_stream_id ID of the last locally-initiated stream that peer will
+ * process. Any locally-initiated streams with a higher ID are ignored by
+ * peer, and are safe to retry on another connection.
+ * @param http2_error_code The HTTP/2 error code (RFC-7540 section 7) sent by peer.
+ * `enum aws_http2_error_code` lists official codes.
+ * @param debug_data The debug data sent by peer. It can be empty. (NOTE: this data is only valid for the lifetime of
+ * the callback. Make a deep copy if you wish to keep it longer.)
+ * @param user_data User-data passed to the callback.
+ */
+typedef void(aws_http2_on_goaway_received_fn)(
+ struct aws_http_connection *http2_connection,
+ uint32_t last_stream_id,
+ uint32_t http2_error_code,
+ struct aws_byte_cursor debug_data,
+ void *user_data);
+
+/**
+ * Invoked when new HTTP/2 settings from peer have been applied.
+ * Settings_array is the array of aws_http2_settings that contains all the settings we just changed in the order we
+ * applied (the order settings arrived). Num_settings is the number of elements in that array.
+ */
+typedef void(aws_http2_on_remote_settings_change_fn)(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ void *user_data);
+
+/**
+ * Callback invoked on each statistics sample.
+ *
+ * connection_nonce is unique to each connection for disambiguation of each callback per connection.
+ */
+typedef void(
+ aws_http_statistics_observer_fn)(size_t connection_nonce, const struct aws_array_list *stats_list, void *user_data);
+
+/**
+ * Configuration options for connection monitoring
+ */
+struct aws_http_connection_monitoring_options {
+
+ /**
+ * minimum required throughput of the connection. Throughput is only measured against the interval of time where
+ * there is actual io to perform. Read and write throughput are measured and checked independently of one another.
+ */
+ uint64_t minimum_throughput_bytes_per_second;
+
+ /*
+ * amount of time, in seconds, throughput is allowed to drop below the minimum before the connection is shut down
+ * as unhealthy.
+ */
+ uint32_t allowable_throughput_failure_interval_seconds;
+
+ /**
+ * invoked on each statistics publish by the underlying IO channel. Install this callback to receive the statistics
+ * for observation. This field is optional.
+ */
+ aws_http_statistics_observer_fn *statistics_observer_fn;
+
+ /**
+ * user_data to be passed to statistics_observer_fn.
+ */
+ void *statistics_observer_user_data;
+};
+
+/**
+ * Options specific to HTTP/1.x connections.
+ */
+struct aws_http1_connection_options {
+ /**
+ * Optional
+ * Capacity in bytes of the HTTP/1 connection's read buffer.
+ * The buffer grows if the flow-control window of the incoming HTTP-stream
+ * reaches zero. If the buffer reaches capacity, no further socket data is
+ * read until the HTTP-stream's window opens again, allowing data to resume flowing.
+ *
+ * Ignored if `manual_window_management` is false.
+ * If zero is specified (the default) then a default capacity is chosen.
+ * A capacity that is too small may hinder throughput.
+ * A capacity that is too big may waste memory without helping throughput.
+ */
+ size_t read_buffer_capacity;
+};
+
+/**
+ * Options specific to HTTP/2 connections.
+ */
+struct aws_http2_connection_options {
+ /**
+ * Optional
+ * The data of settings to change for initial settings.
+ * Note: each setting has its boundary. If settings_array is not set, num_settings has to be 0 to send an empty
+ * SETTINGS frame.
+ */
+ struct aws_http2_setting *initial_settings_array;
+
+ /**
+ * Required
+ * The num of settings to change (Length of the initial_settings_array).
+ */
+ size_t num_initial_settings;
+
+ /**
+ * Optional.
+ * Invoked when the HTTP/2 initial settings change is complete.
+ * If failed to setup the connection, this will not be invoked.
+ * Otherwise, this will be invoked, whether settings change successfully or unsuccessfully.
+ * See `aws_http2_on_change_settings_complete_fn`.
+ */
+ aws_http2_on_change_settings_complete_fn *on_initial_settings_completed;
+
+ /**
+ * Optional
+ * The max number of recently-closed streams to remember.
+ * Set it to zero to use the default setting, AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS
+ *
+ * If the connection receives a frame for a closed stream,
+ * the frame will be ignored or cause a connection error,
+ * depending on the frame type and how the stream was closed.
+ * Remembering more streams reduces the chances that a late frame causes
+ * a connection error, but costs some memory.
+ */
+ size_t max_closed_streams;
+
+ /**
+ * Optional.
+ * Invoked when a valid GOAWAY frame received.
+ * See `aws_http2_on_goaway_received_fn`.
+ */
+ aws_http2_on_goaway_received_fn *on_goaway_received;
+
+ /**
+ * Optional.
+ * Invoked when new settings from peer have been applied.
+ * See `aws_http2_on_remote_settings_change_fn`.
+ */
+ aws_http2_on_remote_settings_change_fn *on_remote_settings_change;
+
+ /**
+ * Optional.
+ * Set to true to manually manage the flow-control window of whole HTTP/2 connection.
+ *
+ * If false, the connection will maintain its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ *
+ * If true, the flow-control window of the whole connection will shrink as body data
+ * is received (headers, padding, and other metadata do not affect the window) for every streams
+ * created on this connection.
+ * The initial connection flow-control window is 65,535.
+ * Once the connection's flow-control window reaches to 0, all the streams on the connection stop receiving any
+ * further data.
+ * The user must call aws_http2_connection_update_window() to increment the connection's
+ * window and keep data flowing.
+ * Note: the padding of data frame counts to the flow-control window.
+ * But, the client will always automatically update the window for padding even for manual window update.
+ */
+ bool conn_manual_window_management;
+};
+
+/**
+ * Options for creating an HTTP client connection.
+ * Initialize with AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT to set default values.
+ */
+struct aws_http_client_connection_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Set by AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT.
+ */
+ size_t self_size;
+
+ /**
+ * Required.
+ * Must outlive the connection.
+ */
+ struct aws_allocator *allocator;
+
+ /**
+ * Required.
+ * The connection keeps the bootstrap alive via ref-counting.
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /**
+ * Required.
+ * aws_http_client_connect() makes a copy.
+ */
+ struct aws_byte_cursor host_name;
+
+ /**
+ * Required.
+ */
+ uint16_t port;
+
+ /**
+ * Required.
+ * aws_http_client_connect() makes a copy.
+ */
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Optional.
+ * aws_http_client_connect() deep-copies all contents,
+ * and keeps `aws_tls_ctx` alive via ref-counting.
+ */
+ const struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Optional
+ * Configuration options related to http proxy usage.
+ * Relevant fields are copied internally.
+ */
+ const struct aws_http_proxy_options *proxy_options;
+
+ /*
+ * Optional.
+ * Configuration for using proxy from environment variable.
+ * Only works when proxy_options is not set.
+ */
+ const struct proxy_env_var_settings *proxy_ev_settings;
+
+ /**
+ * Optional
+ * Configuration options related to connection health monitoring
+ */
+ const struct aws_http_connection_monitoring_options *monitoring_options;
+
+ /**
+ * Set to true to manually manage the flow-control window of each stream.
+ *
+ * If false, the connection will maintain its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ *
+ * If true, the flow-control window of each stream will shrink as body data
+ * is received (headers, padding, and other metadata do not affect the window).
+ * `initial_window_size` determines the starting size of each stream's window for HTTP/1 stream, while HTTP/2 stream
+ * will use the settings AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE to inform the other side about read back pressure
+ *
+ * If a stream's flow-control window reaches 0, no further data will be received. The user must call
+ * aws_http_stream_update_window() to increment the stream's window and keep data flowing.
+ *
+ * If a HTTP/2 connection created, it will ONLY control the stream window
+ * management. Connection window management is controlled by
+ * conn_manual_window_management. Note: the padding of data frame counts to the flow-control window.
+ * But, the client will always automatically update the window for padding even for manual window update.
+ */
+ bool manual_window_management;
+
+ /**
+ * The starting size of each HTTP stream's flow-control window for HTTP/1 connection.
+ * Required if `manual_window_management` is true,
+ * ignored if `manual_window_management` is false.
+ *
+ * Always ignored when HTTP/2 connection created. The initial window size is controlled by the settings,
+ * `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE`
+ */
+ size_t initial_window_size;
+
+ /**
+ * User data for callbacks
+ * Optional.
+ */
+ void *user_data;
+
+ /**
+ * Invoked when connect completes.
+ * Required.
+ * See `aws_http_on_client_connection_setup_fn`.
+ */
+ aws_http_on_client_connection_setup_fn *on_setup;
+
+ /**
+ * Invoked when the connection has finished shutting down.
+ * Never invoked if setup failed.
+ * Optional.
+ * See `aws_http_on_client_connection_shutdown_fn`.
+ */
+ aws_http_on_client_connection_shutdown_fn *on_shutdown;
+
+ /**
+ * Optional.
+ * When true, use prior knowledge to set up an HTTP/2 connection on a cleartext
+ * connection.
+ * When TLS is set and this is true, the connection will failed to be established,
+ * as prior knowledge only works for cleartext TLS.
+ * Refer to RFC7540 3.4
+ */
+ bool prior_knowledge_http2;
+
+ /**
+ * Optional.
+ * Pointer to the hash map containing the ALPN string to protocol to use.
+ * Hash from `struct aws_string *` to `enum aws_http_version`.
+ * If not set, only the predefined string `h2` and `http/1.1` will be recognized. Other negotiated ALPN string will
+ * result in a HTTP1/1 connection
+ * Note: Connection will keep a deep copy of the table and the strings.
+ */
+ struct aws_hash_table *alpn_string_map;
+
+ /**
+ * Options specific to HTTP/1.x connections.
+ * Optional.
+ * Ignored if connection is not HTTP/1.x.
+ * If connection is HTTP/1.x and options were not specified, default values are used.
+ */
+ const struct aws_http1_connection_options *http1_options;
+
+ /**
+ * Options specific to HTTP/2 connections.
+ * Optional.
+ * Ignored if connection is not HTTP/2.
+ * If connection is HTTP/2 and options were not specified, default values are used.
+ */
+ const struct aws_http2_connection_options *http2_options;
+
+ /**
+ * Optional.
+ * Requests the channel/connection be bound to a specific event loop rather than chosen sequentially from the
+ * event loop group associated with the client bootstrap.
+ */
+ struct aws_event_loop *requested_event_loop;
+};
+
+/* Predefined settings identifiers (RFC-7540 6.5.2) */
+enum aws_http2_settings_id {
+ AWS_HTTP2_SETTINGS_BEGIN_RANGE = 0x1, /* Beginning of known values */
+ AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE = 0x1,
+ AWS_HTTP2_SETTINGS_ENABLE_PUSH = 0x2,
+ AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS = 0x3,
+ AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE = 0x4,
+ AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE = 0x5,
+ AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE = 0x6,
+ AWS_HTTP2_SETTINGS_END_RANGE, /* End of known values */
+};
+
+/* A HTTP/2 setting and its value, used in SETTINGS frame */
+struct aws_http2_setting {
+ enum aws_http2_settings_id id;
+ uint32_t value;
+};
+
+/**
+ * HTTP/2: Default value for max closed streams we will keep in memory.
+ */
+#define AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS (32)
+
+/**
+ * HTTP/2: The size of payload for HTTP/2 PING frame.
+ */
+#define AWS_HTTP2_PING_DATA_SIZE (8)
+
+/**
+ * HTTP/2: The number of known settings.
+ */
+#define AWS_HTTP2_SETTINGS_COUNT (6)
+
+/**
+ * Initializes aws_http_client_connection_options with default values.
+ */
+#define AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_client_connection_options), .initial_window_size = SIZE_MAX, }
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Asynchronously establish a client connection.
+ * The on_setup callback is invoked when the operation has created a connection or failed.
+ */
+AWS_HTTP_API
+int aws_http_client_connect(const struct aws_http_client_connection_options *options);
+
+/**
+ * Users must release the connection when they are done with it.
+ * The connection's memory cannot be reclaimed until this is done.
+ * If the connection was not already shutting down, it will be shut down.
+ *
+ * Users should always wait for the on_shutdown() callback to be called before releasing any data passed to the
+ * http_connection (Eg aws_tls_connection_options, aws_socket_options) otherwise there will be race conditions between
+ * http_connection shutdown tasks and memory release tasks, causing Segfaults.
+ */
+AWS_HTTP_API
+void aws_http_connection_release(struct aws_http_connection *connection);
+
+/**
+ * Begin shutdown sequence of the connection if it hasn't already started. This will schedule shutdown tasks on the
+ * EventLoop that may send HTTP/TLS/TCP shutdown messages to peers if necessary, and will eventually cause internal
+ * connection memory to stop being accessed and on_shutdown() callback to be called.
+ *
+ * It's safe to call this function regardless of the connection state as long as you hold a reference to the connection.
+ */
+AWS_HTTP_API
+void aws_http_connection_close(struct aws_http_connection *connection);
+
+/**
+ * Stop accepting new requests for the connection. It will NOT start the shutdown process for the connection. The
+ * requests that are already open can still wait to be completed, but new requests will fail to be created,
+ */
+AWS_HTTP_API
+void aws_http_connection_stop_new_requests(struct aws_http_connection *connection);
+
+/**
+ * Returns true unless the connection is closed or closing.
+ */
+AWS_HTTP_API
+bool aws_http_connection_is_open(const struct aws_http_connection *connection);
+
+/**
+ * Return whether the connection can make a new requests.
+ * If false, then a new connection must be established to make further requests.
+ */
+AWS_HTTP_API
+bool aws_http_connection_new_requests_allowed(const struct aws_http_connection *connection);
+
+/**
+ * Returns true if this is a client connection.
+ */
+AWS_HTTP_API
+bool aws_http_connection_is_client(const struct aws_http_connection *connection);
+
+AWS_HTTP_API
+enum aws_http_version aws_http_connection_get_version(const struct aws_http_connection *connection);
+
+/**
+ * Returns the channel hosting the HTTP connection.
+ * Do not expose this function to language bindings.
+ */
+AWS_HTTP_API
+struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection *connection);
+
+/**
+ * Initialize an map copied from the *src map, which maps `struct aws_string *` to `enum aws_http_version`.
+ */
+AWS_HTTP_API
+int aws_http_alpn_map_init_copy(
+ struct aws_allocator *allocator,
+ struct aws_hash_table *dest,
+ struct aws_hash_table *src);
+
+/**
+ * Initialize an empty hash-table that maps `struct aws_string *` to `enum aws_http_version`.
+ * This map can used in aws_http_client_connections_options.alpn_string_map.
+ */
+AWS_HTTP_API
+int aws_http_alpn_map_init(struct aws_allocator *allocator, struct aws_hash_table *map);
+
+/**
+ * Checks http proxy options for correctness
+ */
+AWS_HTTP_API
+int aws_http_options_validate_proxy_configuration(const struct aws_http_client_connection_options *options);
+
+/**
+ * Send a SETTINGS frame (HTTP/2 only).
+ * SETTINGS will be applied locally when SETTINGS ACK is received from peer.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param settings_array The array of settings to change. Note: each setting has its boundary.
+ * @param num_settings The num of settings to change in settings_array.
+ * @param on_completed Optional callback, see `aws_http2_on_change_settings_complete_fn`.
+ * @param user_data User-data pass to on_completed callback.
+ */
+AWS_HTTP_API
+int aws_http2_connection_change_settings(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+
+/**
+ * Send a PING frame (HTTP/2 only).
+ * Round-trip-time is calculated when PING ACK is received from peer.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param optional_opaque_data Optional payload for PING frame.
+ * Must be NULL, or exactly 8 bytes (AWS_HTTP2_PING_DATA_SIZE).
+ * If NULL, the 8 byte payload will be all zeroes.
+ * @param on_completed Optional callback, invoked when PING ACK is received from peer,
+ * or when a connection error prevents the PING ACK from being received.
+ * Callback always fires on the connection's event-loop thread.
+ * @param user_data User-data pass to on_completed callback.
+ */
+AWS_HTTP_API
+int aws_http2_connection_ping(
+ struct aws_http_connection *http2_connection,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data);
+
+/**
+ * Get the local settings we are using to affect the decoding.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_settings fixed size array of aws_http2_setting gets set to the local settings
+ */
+AWS_HTTP_API
+void aws_http2_connection_get_local_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+
+/**
+ * Get the settings received from remote peer, which we are using to restricts the message to send.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_settings fixed size array of aws_http2_setting gets set to the remote settings
+ */
+AWS_HTTP_API
+void aws_http2_connection_get_remote_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+
+/**
+ * Send a custom GOAWAY frame (HTTP/2 only).
+ *
+ * Note that the connection automatically attempts to send a GOAWAY during
+ * shutdown (unless a GOAWAY with a valid Last-Stream-ID has already been sent).
+ *
+ * This call can be used to gracefully warn the peer of an impending shutdown
+ * (http2_error=0, allow_more_streams=true), or to customize the final GOAWAY
+ * frame that is sent by this connection.
+ *
+ * The other end may not receive the goaway, if the connection already closed.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param http2_error The HTTP/2 error code (RFC-7540 section 7) to send.
+ * `enum aws_http2_error_code` lists official codes.
+ * @param allow_more_streams If true, new peer-initiated streams will continue
+ * to be acknowledged and the GOAWAY's Last-Stream-ID will be set to a max value.
+ * If false, new peer-initiated streams will be ignored and the GOAWAY's
+ * Last-Stream-ID will be set to the latest acknowledged stream.
+ * @param optional_debug_data Optional debug data to send. Size must not exceed 16KB.
+ */
+
+AWS_HTTP_API
+void aws_http2_connection_send_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+
+/**
+ * Get data about the latest GOAWAY frame sent to peer (HTTP/2 only).
+ * If no GOAWAY has been sent, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE will be raised.
+ * Note that GOAWAY frames are typically sent automatically by the connection
+ * during shutdown.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_http2_error Gets set to HTTP/2 error code sent in most recent GOAWAY.
+ * @param out_last_stream_id Gets set to Last-Stream-ID sent in most recent GOAWAY.
+ */
+AWS_HTTP_API
+int aws_http2_connection_get_sent_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+
+/**
+ * Get data about the latest GOAWAY frame received from peer (HTTP/2 only).
+ * If no GOAWAY has been received, or the GOAWAY payload is still in transmitting,
+ * AWS_ERROR_HTTP_DATA_NOT_AVAILABLE will be raised.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_http2_error Gets set to HTTP/2 error code received in most recent GOAWAY.
+ * @param out_last_stream_id Gets set to Last-Stream-ID received in most recent GOAWAY.
+ */
+AWS_HTTP_API
+int aws_http2_connection_get_received_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+
+/**
+ * Increment the connection's flow-control window to keep data flowing (HTTP/2 only).
+ *
+ * If the connection was created with `conn_manual_window_management` set true,
+ * the flow-control window of the connection will shrink as body data is received for all the streams created on it.
+ * (headers, padding, and other metadata do not affect the window).
+ * The initial connection flow-control window is 65,535.
+ * Once the connection's flow-control window reaches to 0, all the streams on the connection stop receiving any further
+ * data.
+ *
+ * If `conn_manual_window_management` is false, this call will have no effect.
+ * The connection maintains its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ *
+ * If you are not connected, this call will have no effect.
+ *
+ * Crashes when the connection is not http2 connection.
+ * The limit of the Maximum Size is 2**31 - 1. If the increment size cause the connection flow window exceeds the
+ * Maximum size, this call will result in the connection lost.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param increment_size The size to increment for the connection's flow control window
+ */
+AWS_HTTP_API
+void aws_http2_connection_update_window(struct aws_http_connection *http2_connection, uint32_t increment_size);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_CONNECTION_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h b/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h
new file mode 100644
index 00000000000..4c02df9382a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h
@@ -0,0 +1,194 @@
+#ifndef AWS_HTTP_CONNECTION_MANAGER_H
+#define AWS_HTTP_CONNECTION_MANAGER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/common/byte_buf.h>
+
+struct aws_client_bootstrap;
+struct aws_http_connection;
+struct aws_http_connection_manager;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+struct proxy_env_var_settings;
+struct aws_http2_setting;
+
+typedef void(aws_http_connection_manager_on_connection_setup_fn)(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+typedef void(aws_http_connection_manager_shutdown_complete_fn)(void *user_data);
+
+/**
+ * Metrics for logging and debugging purpose.
+ */
+struct aws_http_manager_metrics {
+ /**
+ * The number of additional concurrent requests that can be supported by the HTTP manager without needing to
+ * establish additional connections to the target server.
+ *
+ * For connection manager, it equals to connections that's idle.
+ * For stream manager, it equals to the number of streams that are possible to be made without creating new
+ * connection, although the implementation can create new connection without fully filling it.
+ */
+ size_t available_concurrency;
+ /* The number of requests that are awaiting concurrency to be made available from the HTTP manager. */
+ size_t pending_concurrency_acquires;
+ /* The number of connections (http/1.1) or streams (for h2 via. stream manager) currently vended to user. */
+ size_t leased_concurrency;
+};
+
+/*
+ * Connection manager configuration struct.
+ *
+ * Contains all of the configuration needed to create an http connection as well as
+ * the maximum number of connections to ever have in existence.
+ */
+struct aws_http_connection_manager_options {
+ /*
+ * http connection configuration, check `struct aws_http_client_connection_options` for details of each config
+ */
+ struct aws_client_bootstrap *bootstrap;
+ size_t initial_window_size;
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Options to create secure (HTTPS) connections.
+ * For secure connections, set "h2" in the ALPN string for HTTP/2, otherwise HTTP/1.1 is used.
+ *
+ * Leave NULL to create cleartext (HTTP) connections.
+ * For cleartext connections, use `http2_prior_knowledge` (RFC-7540 3.4)
+ * to control whether that are treated as HTTP/1.1 or HTTP/2.
+ */
+ const struct aws_tls_connection_options *tls_connection_options;
+
+ /**
+ * Specify whether you have prior knowledge that cleartext (HTTP) connections are HTTP/2 (RFC-7540 3.4).
+ * If false, then cleartext connections are treated as HTTP/1.1.
+ * It is illegal to set this true when secure connections are being used.
+ * Note that upgrading from HTTP/1.1 to HTTP/2 is not supported (RFC-7540 3.2).
+ */
+ bool http2_prior_knowledge;
+
+ const struct aws_http_connection_monitoring_options *monitoring_options;
+ struct aws_byte_cursor host;
+ uint16_t port;
+
+ /**
+ * Optional.
+ * HTTP/2 specific configuration. Check `struct aws_http2_connection_options` for details of each config
+ */
+ const struct aws_http2_setting *initial_settings_array;
+ size_t num_initial_settings;
+ size_t max_closed_streams;
+ bool http2_conn_manual_window_management;
+
+ /* Proxy configuration for http connection */
+ const struct aws_http_proxy_options *proxy_options;
+
+ /*
+ * Optional.
+ * Configuration for using proxy from environment variable.
+ * Only works when proxy_options is not set.
+ */
+ const struct proxy_env_var_settings *proxy_ev_settings;
+
+ /*
+ * Maximum number of connections this manager is allowed to contain
+ */
+ size_t max_connections;
+
+ /*
+ * Callback and associated user data to invoke when the connection manager has
+ * completely shutdown and has finished deleting itself.
+ * Technically optional, but correctness may be impossible without it.
+ */
+ void *shutdown_complete_user_data;
+ aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback;
+
+ /**
+ * If set to true, the read back pressure mechanism will be enabled.
+ */
+ bool enable_read_back_pressure;
+
+ /**
+ * If set to a non-zero value, then connections that stay in the pool longer than the specified
+ * timeout will be closed automatically.
+ */
+ uint64_t max_connection_idle_in_milliseconds;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * Connection managers are ref counted. Adds one external ref to the manager.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_acquire(struct aws_http_connection_manager *manager);
+
+/*
+ * Connection managers are ref counted. Removes one external ref from the manager.
+ *
+ * When the ref count goes to zero, the connection manager begins its shut down
+ * process. All pending connection acquisitions are failed (with callbacks
+ * invoked) and any (erroneous) subsequent attempts to acquire a connection
+ * fail immediately. The connection manager destroys itself once all pending
+ * asynchronous activities have resolved.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_release(struct aws_http_connection_manager *manager);
+
+/*
+ * Creates a new connection manager with the supplied configuration options.
+ *
+ * The returned connection manager begins with a ref count of 1.
+ */
+AWS_HTTP_API
+struct aws_http_connection_manager *aws_http_connection_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options);
+
+/*
+ * Requests a connection from the manager. The requester is notified of
+ * an acquired connection (or failure to acquire) via the supplied callback.
+ *
+ * For HTTP/2 connections, the callback will not fire until the server's settings have been received.
+ *
+ * Once a connection has been successfully acquired from the manager it
+ * must be released back (via aws_http_connection_manager_release_connection)
+ * at some point. Failure to do so will cause a resource leak.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_acquire_connection(
+ struct aws_http_connection_manager *manager,
+ aws_http_connection_manager_on_connection_setup_fn *callback,
+ void *user_data);
+
+/*
+ * Returns a connection back to the manager. All acquired connections must
+ * eventually be released back to the manager in order to avoid a resource leak.
+ *
+ * Note: it can lead to another acquired callback to be invoked within the thread.
+ */
+AWS_HTTP_API
+int aws_http_connection_manager_release_connection(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection *connection);
+
+/**
+ * Fetch the current manager metrics from connection manager.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_fetch_metrics(
+ const struct aws_http_connection_manager *manager,
+ struct aws_http_manager_metrics *out_metrics);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_CONNECTION_MANAGER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/exports.h b/contrib/restricted/aws/aws-c-http/include/aws/http/exports.h
new file mode 100644
index 00000000000..8b728c7d4ba
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/exports.h
@@ -0,0 +1,29 @@
+#ifndef AWS_HTTP_EXPORTS_H
+#define AWS_HTTP_EXPORTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_HTTP_USE_IMPORT_EXPORT
+# ifdef AWS_HTTP_EXPORTS
+# define AWS_HTTP_API __declspec(dllexport)
+# else
+# define AWS_HTTP_API __declspec(dllimport)
+# endif /* AWS_HTTP_EXPORTS */
+# else
+# define AWS_HTTP_API
+# endif /* USE_IMPORT_EXPORT */
+
+#else
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_HTTP_USE_IMPORT_EXPORT) && defined(AWS_HTTP_EXPORTS)
+# define AWS_HTTP_API __attribute__((visibility("default")))
+# else
+# define AWS_HTTP_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */
+
+#endif /* AWS_HTTP_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/http.h b/contrib/restricted/aws/aws-c-http/include/aws/http/http.h
new file mode 100644
index 00000000000..f02f09dc3e6
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/http.h
@@ -0,0 +1,158 @@
+#ifndef AWS_HTTP_H
+#define AWS_HTTP_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/logging.h>
+#include <aws/http/exports.h>
+#include <aws/io/io.h>
+
+#define AWS_C_HTTP_PACKAGE_ID 2
+
+enum aws_http_errors {
+ AWS_ERROR_HTTP_UNKNOWN = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID),
+ AWS_ERROR_HTTP_HEADER_NOT_FOUND,
+ AWS_ERROR_HTTP_INVALID_HEADER_FIELD,
+ AWS_ERROR_HTTP_INVALID_HEADER_NAME,
+ AWS_ERROR_HTTP_INVALID_HEADER_VALUE,
+ AWS_ERROR_HTTP_INVALID_METHOD,
+ AWS_ERROR_HTTP_INVALID_PATH,
+ AWS_ERROR_HTTP_INVALID_STATUS_CODE,
+ AWS_ERROR_HTTP_MISSING_BODY_STREAM,
+ AWS_ERROR_HTTP_INVALID_BODY_STREAM,
+ AWS_ERROR_HTTP_CONNECTION_CLOSED,
+ AWS_ERROR_HTTP_SWITCHED_PROTOCOLS,
+ AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL,
+ AWS_ERROR_HTTP_REACTION_REQUIRED,
+ AWS_ERROR_HTTP_DATA_NOT_AVAILABLE,
+ AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT,
+ AWS_ERROR_HTTP_CALLBACK_FAILURE,
+ AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE,
+ AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT,
+ AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER,
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_INVALID_STATE_FOR_ACQUIRE,
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW,
+ AWS_ERROR_HTTP_SERVER_CLOSED,
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED,
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN,
+ AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE,
+ AWS_ERROR_HTTP_PROTOCOL_ERROR,
+ AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED,
+ AWS_ERROR_HTTP_GOAWAY_RECEIVED,
+ AWS_ERROR_HTTP_RST_STREAM_RECEIVED,
+ AWS_ERROR_HTTP_RST_STREAM_SENT,
+ AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED,
+ AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING,
+ AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE,
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE,
+ AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE,
+ AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED,
+ AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN,
+ AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE,
+ AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION,
+ AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR,
+ AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED,
+ AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED,
+
+ AWS_ERROR_HTTP_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_HTTP_PACKAGE_ID)
+};
+
+/* Error codes that may be present in HTTP/2 RST_STREAM and GOAWAY frames (RFC-7540 7). */
+enum aws_http2_error_code {
+ AWS_HTTP2_ERR_NO_ERROR = 0x00,
+ AWS_HTTP2_ERR_PROTOCOL_ERROR = 0x01,
+ AWS_HTTP2_ERR_INTERNAL_ERROR = 0x02,
+ AWS_HTTP2_ERR_FLOW_CONTROL_ERROR = 0x03,
+ AWS_HTTP2_ERR_SETTINGS_TIMEOUT = 0x04,
+ AWS_HTTP2_ERR_STREAM_CLOSED = 0x05,
+ AWS_HTTP2_ERR_FRAME_SIZE_ERROR = 0x06,
+ AWS_HTTP2_ERR_REFUSED_STREAM = 0x07,
+ AWS_HTTP2_ERR_CANCEL = 0x08,
+ AWS_HTTP2_ERR_COMPRESSION_ERROR = 0x09,
+ AWS_HTTP2_ERR_CONNECT_ERROR = 0x0A,
+ AWS_HTTP2_ERR_ENHANCE_YOUR_CALM = 0x0B,
+ AWS_HTTP2_ERR_INADEQUATE_SECURITY = 0x0C,
+ AWS_HTTP2_ERR_HTTP_1_1_REQUIRED = 0x0D,
+ AWS_HTTP2_ERR_COUNT,
+};
+
+enum aws_http_log_subject {
+ AWS_LS_HTTP_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID),
+ AWS_LS_HTTP_CONNECTION,
+ AWS_LS_HTTP_ENCODER,
+ AWS_LS_HTTP_DECODER,
+ AWS_LS_HTTP_SERVER,
+ AWS_LS_HTTP_STREAM,
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ AWS_LS_HTTP_STREAM_MANAGER,
+ AWS_LS_HTTP_WEBSOCKET,
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+};
+
+enum aws_http_version {
+ AWS_HTTP_VERSION_UNKNOWN, /* Invalid version. */
+ AWS_HTTP_VERSION_1_0,
+ AWS_HTTP_VERSION_1_1,
+ AWS_HTTP_VERSION_2,
+ AWS_HTTP_VERSION_COUNT,
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes internal datastructures used by aws-c-http.
+ * Must be called before using any functionality in aws-c-http.
+ */
+AWS_HTTP_API
+void aws_http_library_init(struct aws_allocator *alloc);
+
+/**
+ * Clean up internal datastructures used by aws-c-http.
+ * Must not be called until application is done using functionality in aws-c-http.
+ */
+AWS_HTTP_API
+void aws_http_library_clean_up(void);
+
+/**
+ * Returns the description of common status codes.
+ * Ex: 404 -> "Not Found"
+ * An empty string is returned if the status code is not recognized.
+ */
+AWS_HTTP_API
+const char *aws_http_status_text(int status_code);
+
+/**
+ * Shortcuts for common HTTP request methods
+ */
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_get;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_head;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_post;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_put;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_delete;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_connect;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_options;
+
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_method;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_scheme;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_authority;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_path;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_status;
+
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_http;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_https;
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h b/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h
new file mode 100644
index 00000000000..c37da489aa6
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h
@@ -0,0 +1,215 @@
+#ifndef AWS_HTTP2_STREAM_MANAGER_H
+#define AWS_HTTP2_STREAM_MANAGER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http2_stream_manager;
+struct aws_client_bootstrap;
+struct aws_http_connection;
+struct aws_http_connection_manager;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+struct proxy_env_var_settings;
+struct aws_http2_setting;
+struct aws_http_make_request_options;
+struct aws_http_stream;
+struct aws_http_manager_metrics;
+
+/**
+ * Always invoked asynchronously when the stream was created, successfully or not.
+ * When stream is NULL, error code will be set to indicate what happened.
+ * If there is a stream returned, you own the stream completely.
+ * Invoked on the same thread as other callback of the stream, which will be the thread of the connection, ideally.
+ * If there is no connection made, the callback will be invoked from a sperate thread.
+ */
+typedef void(
+ aws_http2_stream_manager_on_stream_acquired_fn)(struct aws_http_stream *stream, int error_code, void *user_data);
+
+/**
+ * Invoked asynchronously when the stream manager has been shutdown completely.
+ * Never invoked when `aws_http2_stream_manager_new` failed.
+ */
+typedef void(aws_http2_stream_manager_shutdown_complete_fn)(void *user_data);
+
+/**
+ * HTTP/2 stream manager configuration struct.
+ *
+ * Contains all of the configuration needed to create an http2 connection as well as
+ * connection manager under the hood.
+ */
+struct aws_http2_stream_manager_options {
+ /**
+ * basic http connection configuration
+ */
+ struct aws_client_bootstrap *bootstrap;
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Options to create secure (HTTPS) connections.
+ * For secure connections, the ALPN string must be "h2".
+ *
+ * To create cleartext (HTTP) connections, leave this NULL
+ * and set `http2_prior_knowledge` (RFC-7540 3.4).
+ */
+ const struct aws_tls_connection_options *tls_connection_options;
+
+ /**
+ * Specify whether you have prior knowledge that cleartext (HTTP) connections are HTTP/2 (RFC-7540 3.4).
+ * It is illegal to set this true when secure connections are being used.
+ * Note that upgrading from HTTP/1.1 to HTTP/2 is not supported (RFC-7540 3.2).
+ */
+ bool http2_prior_knowledge;
+
+ struct aws_byte_cursor host;
+ uint16_t port;
+
+ /**
+ * Optional.
+ * HTTP/2 connection configuration. Check `struct aws_http2_connection_options` for details of each config.
+ * Notes for window control:
+ * - By default, client will will maintain its flow-control windows such that no back-pressure is applied and data
+ * arrives as fast as possible.
+ * - For connection level window control, `conn_manual_window_management` will enable manual control. The
+ * inital window size is not controllable.
+ * - For stream level window control, `enable_read_back_pressure` will enable manual control. The initial window
+ * size needs to be set through `initial_settings_array`.
+ */
+ const struct aws_http2_setting *initial_settings_array;
+ size_t num_initial_settings;
+ size_t max_closed_streams;
+ bool conn_manual_window_management;
+
+ /**
+ * HTTP/2 Stream window control.
+ * If set to true, the read back pressure mechanism will be enabled for streams created.
+ * The initial window size can be set by `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE` via `initial_settings_array`
+ */
+ bool enable_read_back_pressure;
+
+ /* Connection monitor for the underlying connections made */
+ const struct aws_http_connection_monitoring_options *monitoring_options;
+
+ /* Optional. Proxy configuration for underlying http connection */
+ const struct aws_http_proxy_options *proxy_options;
+ const struct proxy_env_var_settings *proxy_ev_settings;
+
+ /**
+ * Required.
+ * When the stream manager finishes deleting all the resources, the callback will be invoked.
+ */
+ void *shutdown_complete_user_data;
+ aws_http2_stream_manager_shutdown_complete_fn *shutdown_complete_callback;
+
+ /**
+ * Optional.
+ * When set, connection will be closed if 5xx response received from server.
+ */
+ bool close_connection_on_server_error;
+ /**
+ * Optional.
+ * The period for all the connections held by stream manager to send a PING in milliseconds.
+ * If you specify 0, manager will NOT send any PING.
+ * Note: if set, it must be large than the time of ping timeout setting.
+ */
+ size_t connection_ping_period_ms;
+ /**
+ * Optional.
+ * Network connection will be closed if a ping response is not received
+ * within this amount of time (milliseconds).
+ * If you specify 0, a default value will be used.
+ */
+ size_t connection_ping_timeout_ms;
+
+ /* TODO: More flexible policy about the connections, but will always has these three values below. */
+ /**
+ * Optional.
+ * 0 will be considered as using a default value.
+ * The ideal number of concurrent streams for a connection. Stream manager will try to create a new connection if
+ * one connection reaches this number. But, if the max connections reaches, manager will reuse connections to create
+ * the acquired steams as much as possible. */
+ size_t ideal_concurrent_streams_per_connection;
+ /**
+ * Optional.
+ * Default is no limit, which will use the limit from the server. 0 will be considered as using the default value.
+ * The real number of concurrent streams per connection will be controlled by the minmal value of the setting from
+ * other end and the value here.
+ */
+ size_t max_concurrent_streams_per_connection;
+ /**
+ * Required.
+ * The max number of connections will be open at same time. If all the connections are full, manager will wait until
+ * available to vender more streams */
+ size_t max_connections;
+};
+
+struct aws_http2_stream_manager_acquire_stream_options {
+ /**
+ * Required.
+ * Invoked when the stream finishes acquiring by stream manager.
+ */
+ aws_http2_stream_manager_on_stream_acquired_fn *callback;
+ /**
+ * Optional.
+ * User data for the callback.
+ */
+ void *user_data;
+ /* Required. see `aws_http_make_request_options` */
+ const struct aws_http_make_request_options *options;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Acquire a refcount from the stream manager, stream manager will start to destroy after the refcount drops to zero.
+ * NULL is acceptable. Initial refcount after new is 1.
+ *
+ * @param manager
+ * @return The same pointer acquiring.
+ */
+AWS_HTTP_API
+struct aws_http2_stream_manager *aws_http2_stream_manager_acquire(struct aws_http2_stream_manager *manager);
+
+/**
+ * Release a refcount from the stream manager, stream manager will start to destroy after the refcount drops to zero.
+ * NULL is acceptable. Initial refcount after new is 1.
+ *
+ * @param manager
+ * @return NULL
+ */
+AWS_HTTP_API
+struct aws_http2_stream_manager *aws_http2_stream_manager_release(struct aws_http2_stream_manager *manager);
+
+AWS_HTTP_API
+struct aws_http2_stream_manager *aws_http2_stream_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http2_stream_manager_options *options);
+
+/**
+ * Acquire a stream from stream manager asynchronously.
+ *
+ * @param http2_stream_manager
+ * @param acquire_stream_option see `aws_http2_stream_manager_acquire_stream_options`
+ */
+AWS_HTTP_API
+void aws_http2_stream_manager_acquire_stream(
+ struct aws_http2_stream_manager *http2_stream_manager,
+ const struct aws_http2_stream_manager_acquire_stream_options *acquire_stream_option);
+
+/**
+ * Fetch the current metrics from stream manager.
+ *
+ * @param http2_stream_manager
+ * @param out_metrics The metrics to be fetched
+ */
+AWS_HTTP_API
+void aws_http2_stream_manager_fetch_metrics(
+ const struct aws_http2_stream_manager *http2_stream_manager,
+ struct aws_http_manager_metrics *out_metrics);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP2_STREAM_MANAGER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h
new file mode 100644
index 00000000000..a97ab0daba9
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h
@@ -0,0 +1,210 @@
+#ifndef AWS_HTTP_CONNECTION_IMPL_H
+#define AWS_HTTP_CONNECTION_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection.h>
+
+#include <aws/http/private/http_impl.h>
+#include <aws/http/server.h>
+
+#include <aws/common/atomics.h>
+#include <aws/io/channel.h>
+#include <aws/io/channel_bootstrap.h>
+
+struct aws_http_message;
+struct aws_http_make_request_options;
+struct aws_http_request_handler_options;
+struct aws_http_stream;
+
+typedef int aws_client_bootstrap_new_socket_channel_fn(struct aws_socket_channel_bootstrap_options *options);
+
+struct aws_http_connection_system_vtable {
+ aws_client_bootstrap_new_socket_channel_fn *new_socket_channel;
+};
+
+struct aws_http_connection_vtable {
+ struct aws_channel_handler_vtable channel_handler_vtable;
+
+ /* This is a callback I wish was in aws_channel_handler_vtable. */
+ void (*on_channel_handler_installed)(struct aws_channel_handler *handler, struct aws_channel_slot *slot);
+
+ struct aws_http_stream *(*make_request)(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+ struct aws_http_stream *(*new_server_request_handler_stream)(
+ const struct aws_http_request_handler_options *options);
+ int (*stream_send_response)(struct aws_http_stream *stream, struct aws_http_message *response);
+ void (*close)(struct aws_http_connection *connection);
+ void (*stop_new_requests)(struct aws_http_connection *connection);
+ bool (*is_open)(const struct aws_http_connection *connection);
+ bool (*new_requests_allowed)(const struct aws_http_connection *connection);
+
+ /* HTTP/2 specific functions */
+ void (*update_window)(struct aws_http_connection *connection, uint32_t increment_size);
+ int (*change_settings)(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+ int (*send_ping)(
+ struct aws_http_connection *http2_connection,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data);
+ void (*send_goaway)(
+ struct aws_http_connection *http2_connection,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+ int (*get_sent_goaway)(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+ int (*get_received_goaway)(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+ void (*get_local_settings)(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+ void (*get_remote_settings)(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+};
+
+typedef int(aws_http_proxy_request_transform_fn)(struct aws_http_message *request, void *user_data);
+
+/**
+ * Base class for connections.
+ * There are specific implementations for each HTTP version.
+ */
+struct aws_http_connection {
+ const struct aws_http_connection_vtable *vtable;
+ struct aws_channel_handler channel_handler;
+ struct aws_channel_slot *channel_slot;
+ struct aws_allocator *alloc;
+ enum aws_http_version http_version;
+
+ aws_http_proxy_request_transform_fn *proxy_request_transform;
+ void *user_data;
+
+ /* Connection starts with 1 hold for the user.
+ * aws_http_streams will also acquire holds on their connection for the duration of their lifetime */
+ struct aws_atomic_var refcount;
+
+ /* Starts at either 1 or 2, increments by two with each new stream */
+ uint32_t next_stream_id;
+
+ union {
+ struct aws_http_connection_client_data {
+ uint8_t delete_me; /* exists to prevent "empty struct" errors */
+ } client;
+
+ struct aws_http_connection_server_data {
+ aws_http_on_incoming_request_fn *on_incoming_request;
+ aws_http_on_server_connection_shutdown_fn *on_shutdown;
+ } server;
+ } client_or_server_data;
+
+ /* On client connections, `client_data` points to client_or_server_data.client and `server_data` is null.
+ * Opposite is true on server connections */
+ struct aws_http_connection_client_data *client_data;
+ struct aws_http_connection_server_data *server_data;
+
+ bool stream_manual_window_management;
+};
+
+/* Gets a client connection up and running.
+ * Responsible for firing on_setup and on_shutdown callbacks. */
+struct aws_http_client_bootstrap {
+ struct aws_allocator *alloc;
+ bool is_using_tls;
+ bool stream_manual_window_management;
+ bool prior_knowledge_http2;
+ size_t initial_window_size;
+ struct aws_http_connection_monitoring_options monitoring_options;
+ void *user_data;
+ aws_http_on_client_connection_setup_fn *on_setup;
+ aws_http_on_client_connection_shutdown_fn *on_shutdown;
+ aws_http_proxy_request_transform_fn *proxy_request_transform;
+
+ struct aws_http1_connection_options http1_options;
+ struct aws_http2_connection_options http2_options; /* allocated with bootstrap */
+ struct aws_hash_table *alpn_string_map; /* allocated with bootstrap */
+ struct aws_http_connection *connection;
+};
+
+AWS_EXTERN_C_BEGIN
+AWS_HTTP_API
+void aws_http_client_bootstrap_destroy(struct aws_http_client_bootstrap *bootstrap);
+
+AWS_HTTP_API
+void aws_http_connection_set_system_vtable(const struct aws_http_connection_system_vtable *system_vtable);
+
+AWS_HTTP_API
+int aws_http_client_connect_internal(
+ const struct aws_http_client_connection_options *options,
+ aws_http_proxy_request_transform_fn *proxy_request_transform);
+
+/**
+ * Internal API for adding a reference to a connection
+ */
+AWS_HTTP_API
+void aws_http_connection_acquire(struct aws_http_connection *connection);
+
+/**
+ * Allow tests to fake stats data
+ */
+AWS_HTTP_API
+struct aws_crt_statistics_http1_channel *aws_h1_connection_get_statistics(struct aws_http_connection *connection);
+
+/**
+ * Gets the next available stream id within the connection. Valid for creating both h1 and h2 streams.
+ *
+ * This function is not thread-safe.
+ *
+ * Returns 0 if there was an error.
+ */
+AWS_HTTP_API
+uint32_t aws_http_connection_get_next_stream_id(struct aws_http_connection *connection);
+
+/**
+ * Layers an http channel handler/connection onto a channel. Moved from internal to private so that the proxy
+ * logic could apply a new http connection/handler after tunneling proxy negotiation (into http) is finished.
+ * This is a synchronous operation.
+ *
+ * @param alloc memory allocator to use
+ * @param channel channel to apply the http handler/connection to
+ * @param is_server should the handler behave like an http server
+ * @param is_using_tls is tls is being used (do an alpn check of the to-the-left channel handler)
+ * @param manual_window_management is manual window management enabled
+ * @param prior_knowledge_http2 prior knowledge about http2 connection to be used
+ * @param initial_window_size what should the initial window size be
+ * @param alpn_string_map the customized ALPN string map from `struct aws_string *` to `enum aws_http_version`.
+ * @param http1_options http1 options
+ * @param http2_options http2 options
+ * @return a new http connection or NULL on failure
+ */
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_channel_handler(
+ struct aws_allocator *alloc,
+ struct aws_channel *channel,
+ bool is_server,
+ bool is_using_tls,
+ bool manual_window_management,
+ bool prior_knowledge_http2,
+ size_t initial_window_size,
+ const struct aws_hash_table *alpn_string_map,
+ const struct aws_http1_connection_options *http1_options,
+ const struct aws_http2_connection_options *http2_options,
+ void *connection_user_data);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_CONNECTION_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h
new file mode 100644
index 00000000000..115ba661364
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h
@@ -0,0 +1,50 @@
+#ifndef AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H
+#define AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/http/connection.h>
+
+struct aws_http_connection_manager;
+
+typedef int(aws_http_connection_manager_create_connection_fn)(const struct aws_http_client_connection_options *options);
+typedef void(aws_http_connection_manager_close_connection_fn)(struct aws_http_connection *connection);
+typedef void(aws_http_connection_release_connection_fn)(struct aws_http_connection *connection);
+typedef bool(aws_http_connection_is_connection_available_fn)(const struct aws_http_connection *connection);
+typedef bool(aws_http_connection_manager_is_callers_thread_fn)(struct aws_channel *channel);
+typedef struct aws_channel *(aws_http_connection_manager_connection_get_channel_fn)(
+ struct aws_http_connection *connection);
+typedef enum aws_http_version(aws_http_connection_manager_connection_get_version_fn)(
+ const struct aws_http_connection *connection);
+
+struct aws_http_connection_manager_system_vtable {
+ /*
+ * Downstream http functions
+ */
+ aws_http_connection_manager_create_connection_fn *create_connection;
+ aws_http_connection_manager_close_connection_fn *close_connection;
+ aws_http_connection_release_connection_fn *release_connection;
+ aws_http_connection_is_connection_available_fn *is_connection_available;
+ aws_io_clock_fn *get_monotonic_time;
+ aws_http_connection_manager_is_callers_thread_fn *is_callers_thread;
+ aws_http_connection_manager_connection_get_channel_fn *connection_get_channel;
+ aws_http_connection_manager_connection_get_version_fn *connection_get_version;
+};
+
+AWS_HTTP_API
+bool aws_http_connection_manager_system_vtable_is_valid(const struct aws_http_connection_manager_system_vtable *table);
+
+AWS_HTTP_API
+void aws_http_connection_manager_set_system_vtable(
+ struct aws_http_connection_manager *manager,
+ const struct aws_http_connection_manager_system_vtable *system_vtable);
+
+AWS_HTTP_API
+extern const struct aws_http_connection_manager_system_vtable *g_aws_http_connection_manager_default_system_vtable_ptr;
+
+#endif /* AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h
new file mode 100644
index 00000000000..0dee2d84db7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h
@@ -0,0 +1,46 @@
+#ifndef AWS_HTTP_HTTP_MONITOR_H
+#define AWS_HTTP_HTTP_MONITOR_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection.h>
+#include <aws/http/http.h>
+
+struct aws_allocator;
+struct aws_crt_statistics_handler;
+
+/*
+ * Needed by tests
+ */
+struct aws_statistics_handler_http_connection_monitor_impl {
+ struct aws_http_connection_monitoring_options options;
+
+ uint64_t throughput_failure_time_ms;
+ uint32_t last_incoming_stream_id;
+ uint32_t last_outgoing_stream_id;
+ uint64_t last_measured_throughput;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates a new http connection monitor that regularly checks the connection's throughput and shuts the connection
+ * down if the a minimum threshold is not met for a configurable number of seconds.
+ */
+AWS_HTTP_API
+struct aws_crt_statistics_handler *aws_crt_statistics_handler_new_http_connection_monitor(
+ struct aws_allocator *allocator,
+ struct aws_http_connection_monitoring_options *options);
+
+/**
+ * Validates monitoring options to ensure they are sensible
+ */
+AWS_HTTP_API
+bool aws_http_connection_monitoring_options_is_valid(const struct aws_http_connection_monitoring_options *options);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_HTTP_MONITOR_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h
new file mode 100644
index 00000000000..86a5124eaf0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h
@@ -0,0 +1,201 @@
+#ifndef AWS_HTTP_H1_CONNECTION_H
+#define AWS_HTTP_H1_CONNECTION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/mutex.h>
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/h1_encoder.h>
+#include <aws/http/statistics.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4214) /* nonstandard extension used: bit field types other than int */
+#endif
+
+struct aws_h1_connection {
+ struct aws_http_connection base;
+
+ size_t initial_stream_window_size;
+
+ /* Task responsible for sending data.
+ * As long as there is data available to send, the task will be "active" and repeatedly:
+ * 1) Encode outgoing stream data to an aws_io_message and send it up the channel.
+ * 2) Wait until the aws_io_message's write_complete callback fires.
+ * 3) Reschedule the task to run again.
+ *
+ * `thread_data.is_outgoing_stream_task_active` tells whether the task is "active".
+ *
+ * If there is no data available to write (waiting for user to add more streams or chunks),
+ * then the task stops being active. The task is made active again when the user
+ * adds more outgoing data. */
+ struct aws_channel_task outgoing_stream_task;
+
+ /* Task that removes items from `synced_data` and does their on-thread work.
+ * Runs once and wait until it's scheduled again.
+ * Any function that wants to schedule this task MUST:
+ * - acquire the synced_data.lock
+ * - check whether `synced_data.is_cross_thread_work_scheduled` was true or false.
+ * - set `synced_data.is_cross_thread_work_scheduled = true`
+ * - release synced_data.lock
+ * - ONLY IF `synced_data.is_cross_thread_work_scheduled` CHANGED from false to true:
+ * - then schedule the task
+ */
+ struct aws_channel_task cross_thread_work_task;
+
+ /* Only the event-loop thread may touch this data */
+ struct {
+ /* List of streams being worked on. */
+ struct aws_linked_list stream_list;
+
+ /* Points to the stream whose data is currently being sent.
+ * This stream is ALWAYS in the `stream_list`.
+ * HTTP pipelining is supported, so once the stream is completely written
+ * we'll start working on the next stream in the list */
+ struct aws_h1_stream *outgoing_stream;
+
+ /* Points to the stream being decoded.
+ * This stream is ALWAYS in the `stream_list`. */
+ struct aws_h1_stream *incoming_stream;
+ struct aws_h1_decoder *incoming_stream_decoder;
+
+ /* Used to encode requests and responses */
+ struct aws_h1_encoder encoder;
+
+ /**
+ * All aws_io_messages arriving in the read direction are queued here before processing.
+ * This allows the connection to receive more data than the the current HTTP-stream might allow,
+ * and process the data later when HTTP-stream's window opens or the next stream begins.
+ *
+ * The `aws_io_message.copy_mark` is used to track progress on partially processed messages.
+ * `pending_bytes` is the sum of all unprocessed bytes across all queued messages.
+ * `capacity` is the limit for how many unprocessed bytes we'd like in the queue.
+ */
+ struct {
+ struct aws_linked_list messages;
+ size_t pending_bytes;
+ size_t capacity;
+ } read_buffer;
+
+ /**
+ * The connection's current window size.
+ * We use this variable, instead of the existing `aws_channel_slot.window_size`,
+ * because that variable is not updated immediately, the channel uses a task to update it.
+ * Since we use the difference between current and desired window size when deciding
+ * how much to increment, we need the most up-to-date values possible.
+ */
+ size_t connection_window;
+
+ /* Only used by tests. Sum of window_increments issued by this slot. Resets each time it's queried */
+ size_t recent_window_increments;
+
+ struct aws_crt_statistics_http1_channel stats;
+
+ uint64_t outgoing_stream_timestamp_ns;
+ uint64_t incoming_stream_timestamp_ns;
+
+ /* True when read and/or writing has stopped, whether due to errors or normal channel shutdown. */
+ bool is_reading_stopped : 1;
+ bool is_writing_stopped : 1;
+
+ /* If true, the connection has upgraded to another protocol.
+ * It will pass data to adjacent channel handlers without altering it.
+ * The connection can no longer service request/response streams. */
+ bool has_switched_protocols : 1;
+
+ /* Server-only. Request-handler streams can only be created while this is true. */
+ bool can_create_request_handler_stream : 1;
+
+ /* see `outgoing_stream_task` */
+ bool is_outgoing_stream_task_active : 1;
+
+ bool is_processing_read_messages : 1;
+ } thread_data;
+
+ /* Any thread may touch this data, but the lock must be held */
+ struct {
+ struct aws_mutex lock;
+
+ /* New client streams that have not been moved to `stream_list` yet.
+ * This list is not used on servers. */
+ struct aws_linked_list new_client_stream_list;
+
+ /* If non-zero, then window_update_task is scheduled */
+ size_t window_update_size;
+
+ /* If non-zero, reason to immediately reject new streams. (ex: closing) */
+ int new_stream_error_code;
+
+ /* See `cross_thread_work_task` */
+ bool is_cross_thread_work_task_scheduled : 1;
+
+ /* For checking status from outside the event-loop thread. */
+ bool is_open : 1;
+
+ } synced_data;
+};
+
+/* Allow tests to check current window stats */
+struct aws_h1_window_stats {
+ size_t connection_window;
+ size_t recent_window_increments; /* Resets to 0 each time window stats are queried*/
+ size_t buffer_capacity;
+ size_t buffer_pending_bytes;
+ uint64_t stream_window;
+ bool has_incoming_stream;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* The functions below are exported so they can be accessed from tests. */
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http1_1_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options);
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http1_1_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options);
+
+/* Allow tests to check current window stats */
+AWS_HTTP_API
+struct aws_h1_window_stats aws_h1_connection_window_stats(struct aws_http_connection *connection_base);
+
+AWS_EXTERN_C_END
+
+/* DO NOT export functions below. They're only used by other .c files in this library */
+
+/* TODO: introduce naming conventions for private header functions */
+
+void aws_h1_connection_lock_synced_data(struct aws_h1_connection *connection);
+void aws_h1_connection_unlock_synced_data(struct aws_h1_connection *connection);
+
+/**
+ * Try to kick off the outgoing-stream-task.
+ * If task is already active, nothing happens.
+ * If there's nothing to do, the task will immediately stop itself.
+ * Call this whenever the user provides new outgoing data (ex: new stream, new chunk).
+ * MUST be called from the connection's event-loop thread.
+ */
+void aws_h1_connection_try_write_outgoing_stream(struct aws_h1_connection *connection);
+
+/**
+ * If any read messages are queued, and the downstream window is non-zero,
+ * process data and send it downstream. Then calculate the connection's
+ * desired window size and increment it if necessary.
+ *
+ * During normal operations "downstream" means the current incoming stream.
+ * If the connection has switched protocols "downstream" means the next
+ * channel handler in the read direction.
+ */
+void aws_h1_connection_try_process_read_messages(struct aws_h1_connection *connection);
+
+#endif /* AWS_HTTP_H1_CONNECTION_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h
new file mode 100644
index 00000000000..eaf8956cdd3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h
@@ -0,0 +1,90 @@
+#ifndef AWS_HTTP_H1_DECODER_H
+#define AWS_HTTP_H1_DECODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/request_response_impl.h>
+
+struct aws_h1_decoded_header {
+ /* Name of the header. If the type is `AWS_HTTP_HEADER_NAME_UNKNOWN` then `name_data` must be parsed manually. */
+ enum aws_http_header_name name;
+
+ /* Raw buffer storing the header's name. */
+ struct aws_byte_cursor name_data;
+
+ /* Raw buffer storing the header's value. */
+ struct aws_byte_cursor value_data;
+
+ /* Raw buffer storing the entire header. */
+ struct aws_byte_cursor data;
+};
+
+struct aws_h1_decoder_vtable {
+ /**
+ * Called from `aws_h*_decode` when an http header has been received.
+ * All pointers are strictly *read only*; any data that needs to persist must be copied out into user-owned memory.
+ */
+ int (*on_header)(const struct aws_h1_decoded_header *header, void *user_data);
+
+ /**
+ * Called from `aws_h1_decode` when a portion of the http body has been received.
+ * `finished` is true if this is the last section of the http body, and false if more body data is yet to be
+ * received. All pointers are strictly *read only*; any data that needs to persist must be copied out into
+ * user-owned memory.
+ */
+ int (*on_body)(const struct aws_byte_cursor *data, bool finished, void *user_data);
+
+ /* Only needed for requests, can be NULL for responses. */
+ int (*on_request)(
+ enum aws_http_method method_enum,
+ const struct aws_byte_cursor *method_str,
+ const struct aws_byte_cursor *uri,
+ void *user_data);
+
+ /* Only needed for responses, can be NULL for requests. */
+ int (*on_response)(int status_code, void *user_data);
+
+ int (*on_done)(void *user_data);
+};
+
+/**
+ * Structure used to initialize an `aws_h1_decoder`.
+ */
+struct aws_h1_decoder_params {
+ struct aws_allocator *alloc;
+ size_t scratch_space_initial_size;
+ /* Set false if decoding responses */
+ bool is_decoding_requests;
+ void *user_data;
+ struct aws_h1_decoder_vtable vtable;
+};
+
+struct aws_h1_decoder;
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API struct aws_h1_decoder *aws_h1_decoder_new(struct aws_h1_decoder_params *params);
+AWS_HTTP_API void aws_h1_decoder_destroy(struct aws_h1_decoder *decoder);
+AWS_HTTP_API int aws_h1_decode(struct aws_h1_decoder *decoder, struct aws_byte_cursor *data);
+
+AWS_HTTP_API void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id);
+AWS_HTTP_API void aws_h1_decoder_set_body_headers_ignored(struct aws_h1_decoder *decoder, bool body_headers_ignored);
+
+/* RFC-7230 section 4.2 Message Format */
+#define AWS_HTTP_TRANSFER_ENCODING_CHUNKED (1 << 0)
+#define AWS_HTTP_TRANSFER_ENCODING_GZIP (1 << 1)
+#define AWS_HTTP_TRANSFER_ENCODING_DEFLATE (1 << 2)
+#define AWS_HTTP_TRANSFER_ENCODING_DEPRECATED_COMPRESS (1 << 3)
+AWS_HTTP_API int aws_h1_decoder_get_encoding_flags(const struct aws_h1_decoder *decoder);
+
+AWS_HTTP_API uint64_t aws_h1_decoder_get_content_length(const struct aws_h1_decoder *decoder);
+AWS_HTTP_API bool aws_h1_decoder_get_body_headers_ignored(const struct aws_h1_decoder *decoder);
+AWS_HTTP_API enum aws_http_header_block aws_h1_decoder_get_header_block(const struct aws_h1_decoder *decoder);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H1_DECODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h
new file mode 100644
index 00000000000..11b4965c0da
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h
@@ -0,0 +1,140 @@
+#ifndef AWS_HTTP_H1_ENCODER_H
+#define AWS_HTTP_H1_ENCODER_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/request_response_impl.h>
+
+struct aws_h1_chunk {
+ struct aws_allocator *allocator;
+ struct aws_input_stream *data;
+ uint64_t data_size;
+ aws_http1_stream_write_chunk_complete_fn *on_complete;
+ void *user_data;
+ struct aws_linked_list_node node;
+ /* Buffer containing pre-encoded start line: chunk-size [chunk-ext] CRLF */
+ struct aws_byte_buf chunk_line;
+};
+
+struct aws_h1_trailer {
+ struct aws_allocator *allocator;
+ struct aws_byte_buf trailer_data;
+};
+
+/**
+ * Message to be submitted to encoder.
+ * Contains data necessary for encoder to write an outgoing request or response.
+ */
+struct aws_h1_encoder_message {
+ /* Upon creation, the "head" (everything preceding body) is buffered here. */
+ struct aws_byte_buf outgoing_head_buf;
+ /* Single stream used for unchunked body */
+ struct aws_input_stream *body;
+
+ /* Pointer to list of `struct aws_h1_chunk`, used for chunked encoding.
+ * List is owned by aws_h1_stream.
+ * Encoder completes/frees/pops front chunk when it's done sending.
+ * If list goes empty, encoder waits for more chunks to arrive.
+ * A chunk with data_size=0 means "final chunk" */
+ struct aws_linked_list *pending_chunk_list;
+
+ /* Pointer to chunked_trailer, used for chunked_trailer. */
+ struct aws_h1_trailer *trailer;
+
+ /* If non-zero, length of unchunked body to send */
+ uint64_t content_length;
+ bool has_connection_close_header;
+ bool has_chunked_encoding_header;
+};
+
+enum aws_h1_encoder_state {
+ AWS_H1_ENCODER_STATE_INIT,
+ AWS_H1_ENCODER_STATE_HEAD,
+ AWS_H1_ENCODER_STATE_UNCHUNKED_BODY,
+ AWS_H1_ENCODER_STATE_CHUNK_NEXT,
+ AWS_H1_ENCODER_STATE_CHUNK_LINE,
+ AWS_H1_ENCODER_STATE_CHUNK_BODY,
+ AWS_H1_ENCODER_STATE_CHUNK_END,
+ AWS_H1_ENCODER_STATE_CHUNK_TRAILER,
+ AWS_H1_ENCODER_STATE_DONE,
+};
+
+struct aws_h1_encoder {
+ struct aws_allocator *allocator;
+
+ enum aws_h1_encoder_state state;
+ /* Current message being encoded */
+ struct aws_h1_encoder_message *message;
+ /* Used by some states to track progress. Reset to 0 whenever state changes */
+ uint64_t progress_bytes;
+ /* Current chunk */
+ struct aws_h1_chunk *current_chunk;
+ /* Number of chunks sent, just used for logging */
+ size_t chunk_count;
+ /* Encoder logs with this stream ptr as the ID, and passes this ptr to the chunk_complete callback */
+ struct aws_http_stream *current_stream;
+};
+
+struct aws_h1_chunk *aws_h1_chunk_new(struct aws_allocator *allocator, const struct aws_http1_chunk_options *options);
+struct aws_h1_trailer *aws_h1_trailer_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_headers *trailing_headers);
+
+void aws_h1_trailer_destroy(struct aws_h1_trailer *trailer);
+
+/* Just destroy the chunk (don't fire callback) */
+void aws_h1_chunk_destroy(struct aws_h1_chunk *chunk);
+
+/* Destroy chunk and fire its completion callback */
+void aws_h1_chunk_complete_and_destroy(struct aws_h1_chunk *chunk, struct aws_http_stream *http_stream, int error_code);
+
+int aws_chunk_line_from_options(struct aws_http1_chunk_options *options, struct aws_byte_buf *chunk_line);
+
+AWS_EXTERN_C_BEGIN
+
+/* Validate request and cache any info the encoder will need later in the "encoder message". */
+AWS_HTTP_API
+int aws_h1_encoder_message_init_from_request(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *request,
+ struct aws_linked_list *pending_chunk_list);
+
+int aws_h1_encoder_message_init_from_response(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *response,
+ bool body_headers_ignored,
+ struct aws_linked_list *pending_chunk_list);
+
+AWS_HTTP_API
+void aws_h1_encoder_message_clean_up(struct aws_h1_encoder_message *message);
+
+AWS_HTTP_API
+void aws_h1_encoder_init(struct aws_h1_encoder *encoder, struct aws_allocator *allocator);
+
+AWS_HTTP_API
+void aws_h1_encoder_clean_up(struct aws_h1_encoder *encoder);
+
+AWS_HTTP_API
+int aws_h1_encoder_start_message(
+ struct aws_h1_encoder *encoder,
+ struct aws_h1_encoder_message *message,
+ struct aws_http_stream *stream);
+
+AWS_HTTP_API
+int aws_h1_encoder_process(struct aws_h1_encoder *encoder, struct aws_byte_buf *out_buf);
+
+AWS_HTTP_API
+bool aws_h1_encoder_is_message_in_progress(const struct aws_h1_encoder *encoder);
+
+/* Return true if the encoder is stuck waiting for more chunks to be added to the current message */
+AWS_HTTP_API
+bool aws_h1_encoder_is_waiting_for_chunks(const struct aws_h1_encoder *encoder);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H1_ENCODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h
new file mode 100644
index 00000000000..df1446ec9b3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h
@@ -0,0 +1,123 @@
+#ifndef AWS_HTTP_H1_STREAM_H
+#define AWS_HTTP_H1_STREAM_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h1_encoder.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/io/channel.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4214) /* nonstandard extension used: bit field types other than int */
+#endif
+
+/* Simple view of stream's state.
+ * Used to determine whether it's safe for a user to call functions that alter state. */
+enum aws_h1_stream_api_state {
+ AWS_H1_STREAM_API_STATE_INIT,
+ AWS_H1_STREAM_API_STATE_ACTIVE,
+ AWS_H1_STREAM_API_STATE_COMPLETE,
+};
+
+struct aws_h1_stream {
+ struct aws_http_stream base;
+
+ struct aws_linked_list_node node;
+
+ /* Task that removes items from `synced_data` and does their on-thread work.
+ * Runs once and wait until it's scheduled again.
+ * Any function that wants to schedule this task MUST:
+ * - acquire the synced_data.lock
+ * - check whether `synced_data.is_cross_thread_work_scheduled` was true or false.
+ * - set `synced_data.is_cross_thread_work_scheduled = true`
+ * - release synced_data.lock
+ * - ONLY IF `synced_data.is_cross_thread_work_scheduled` CHANGED from false to true:
+ * - increment the stream's refcount, to keep stream alive until task runs
+ * - schedule the task
+ */
+ struct aws_channel_task cross_thread_work_task;
+
+ /* Message (derived from outgoing request or response) to be submitted to encoder */
+ struct aws_h1_encoder_message encoder_message;
+
+ bool is_outgoing_message_done;
+
+ bool is_incoming_message_done;
+ bool is_incoming_head_done;
+
+ /* If true, this is the last stream the connection should process.
+ * See RFC-7230 Section 6: Connection Management. */
+ bool is_final_stream;
+
+ /* Buffer for incoming data that needs to stick around. */
+ struct aws_byte_buf incoming_storage_buf;
+
+ struct {
+ /* TODO: move most other members in here */
+
+ /* List of `struct aws_h1_chunk`, used for chunked encoding.
+ * Encoder completes/frees/pops front chunk when it's done sending. */
+ struct aws_linked_list pending_chunk_list;
+
+ struct aws_h1_encoder_message message;
+
+ /* Size of stream's flow-control window.
+ * Only body data (not headers, etc) counts against the stream's flow-control window. */
+ uint64_t stream_window;
+
+ /* Whether a "request handler" stream has a response to send.
+ * Has mirror variable in synced_data */
+ bool has_outgoing_response : 1;
+ } thread_data;
+
+ /* Any thread may touch this data, but the connection's lock must be held.
+ * Sharing a lock is fine because it's rare for an HTTP/1 connection
+ * to have more than one stream at a time. */
+ struct {
+ /* List of `struct aws_h1_chunk` which have been submitted by user,
+ * but haven't yet moved to encoder_message.pending_chunk_list where the encoder will find them. */
+ struct aws_linked_list pending_chunk_list;
+
+ /* trailing headers which have been submitted by user,
+ * but haven't yet moved to encoder_message where the encoder will find them. */
+ struct aws_h1_trailer *pending_trailer;
+
+ enum aws_h1_stream_api_state api_state;
+
+ /* Sum of all aws_http_stream_update_window() calls that haven't yet moved to thread_data.stream_window */
+ uint64_t pending_window_update;
+
+ /* See `cross_thread_work_task` */
+ bool is_cross_thread_work_task_scheduled : 1;
+
+ /* Whether a "request handler" stream has a response to send.
+ * Has mirror variable in thread_data */
+ bool has_outgoing_response : 1;
+
+ /* Whether the outgoing message is using chunked encoding */
+ bool using_chunked_encoding : 1;
+
+ /* Whether the final 0 length chunk has already been sent */
+ bool has_final_chunk : 1;
+
+ /* Whether the chunked trailer has already been sent */
+ bool has_added_trailer : 1;
+ } synced_data;
+};
+
+/* DO NOT export functions below. They're only used by other .c files in this library */
+
+struct aws_h1_stream *aws_h1_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+struct aws_h1_stream *aws_h1_stream_new_request_handler(const struct aws_http_request_handler_options *options);
+
+int aws_h1_stream_activate(struct aws_http_stream *stream);
+
+int aws_h1_stream_send_response(struct aws_h1_stream *stream, struct aws_http_message *response);
+
+#endif /* AWS_HTTP_H1_STREAM_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h
new file mode 100644
index 00000000000..6d42b831602
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h
@@ -0,0 +1,289 @@
+#ifndef AWS_HTTP_H2_CONNECTION_H
+#define AWS_HTTP_H2_CONNECTION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/atomics.h>
+#include <aws/common/fifo_cache.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/mutex.h>
+
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/h2_frames.h>
+#include <aws/http/statistics.h>
+
+struct aws_h2_decoder;
+struct aws_h2_stream;
+
+struct aws_h2_connection {
+ struct aws_http_connection base;
+
+ aws_http2_on_goaway_received_fn *on_goaway_received;
+ aws_http2_on_remote_settings_change_fn *on_remote_settings_change;
+
+ struct aws_channel_task cross_thread_work_task;
+ struct aws_channel_task outgoing_frames_task;
+
+ bool conn_manual_window_management;
+
+ /* Only the event-loop thread may touch this data */
+ struct {
+ struct aws_h2_decoder *decoder;
+ struct aws_h2_frame_encoder encoder;
+
+ /* True when reading/writing has stopped, whether due to errors or normal channel shutdown. */
+ bool is_reading_stopped;
+ bool is_writing_stopped;
+
+ bool is_outgoing_frames_task_active;
+
+ /* Settings received from peer, which restricts the message to send */
+ uint32_t settings_peer[AWS_HTTP2_SETTINGS_END_RANGE];
+ /* Local settings to send/sent to peer, which affects the decoding */
+ uint32_t settings_self[AWS_HTTP2_SETTINGS_END_RANGE];
+
+ /* List using aws_h2_pending_settings.node
+ * Contains settings waiting to be ACKed by peer and applied */
+ struct aws_linked_list pending_settings_queue;
+
+ /* List using aws_h2_pending_ping.node
+ * Pings waiting to be ACKed by peer */
+ struct aws_linked_list pending_ping_queue;
+
+ /* Most recent stream-id that was initiated by peer */
+ uint32_t latest_peer_initiated_stream_id;
+
+ /* Maps stream-id to aws_h2_stream*.
+ * Contains all streams in the open, reserved, and half-closed states (terms from RFC-7540 5.1).
+ * Once a stream enters closed state, it is removed from this map. */
+ struct aws_hash_table active_streams_map;
+
+ /* List using aws_h2_stream.node.
+ * Contains all streams with DATA frames to send.
+ * Any stream in this list is also in the active_streams_map. */
+ struct aws_linked_list outgoing_streams_list;
+
+ /* List using aws_h2_stream.node.
+ * Contains all streams with DATA frames to send, and cannot send now due to flow control.
+ * Waiting for WINDOW_UPDATE to set them free */
+ struct aws_linked_list stalled_window_streams_list;
+
+ /* List using aws_h2_stream.node.
+ * Contains all streams that are open, but are only sending data when notified, rather than polling
+ * for it (e.g. event streams)
+ * Streams are moved to the outgoing_streams_list until they send pending data, then are moved back
+ * to this list to sleep until more data comes in
+ */
+ struct aws_linked_list waiting_streams_list;
+
+ /* List using aws_h2_frame.node.
+ * Queues all frames (except DATA frames) for connection to send.
+ * When queue is empty, then we send DATA frames from the outgoing_streams_list */
+ struct aws_linked_list outgoing_frames_queue;
+
+ /* FIFO cache for closed stream, key: stream-id, value: aws_h2_stream_closed_when.
+ * Contains data about streams that were recently closed.
+ * The oldest entry will be removed if the cache is full */
+ struct aws_cache *closed_streams;
+
+ /* Flow-control of connection from peer. Indicating the buffer capacity of our peer.
+ * Reduce the space after sending a flow-controlled frame. Increment after receiving WINDOW_UPDATE for
+ * connection */
+ size_t window_size_peer;
+
+ /* Flow-control of connection for this side.
+ * Reduce the space after receiving a flow-controlled frame. Increment after sending WINDOW_UPDATE for
+ * connection */
+ size_t window_size_self;
+
+ /* Highest self-initiated stream-id that peer might have processed.
+ * Defaults to max stream-id, may be lowered when GOAWAY frame received. */
+ uint32_t goaway_received_last_stream_id;
+
+ /* Last-stream-id sent in most recent GOAWAY frame. Defaults to max stream-id. */
+ uint32_t goaway_sent_last_stream_id;
+
+ /* Frame we are encoding now. NULL if we are not encoding anything. */
+ struct aws_h2_frame *current_outgoing_frame;
+
+ /* Pointer to initial pending settings. If ACKed by peer, it will be NULL. */
+ struct aws_h2_pending_settings *init_pending_settings;
+
+ /* Cached channel shutdown values.
+ * If possible, we delay shutdown-in-the-write-dir until GOAWAY is written. */
+ int channel_shutdown_error_code;
+ bool channel_shutdown_immediately;
+ bool channel_shutdown_waiting_for_goaway_to_be_written;
+
+ /* TODO: Consider adding stream monitor */
+ struct aws_crt_statistics_http2_channel stats;
+
+ /* Timestamp when connection has data to send, which is when there is an active stream with body to send */
+ uint64_t outgoing_timestamp_ns;
+ /* Timestamp when connection has data to receive, which is when there is an active stream */
+ uint64_t incoming_timestamp_ns;
+ } thread_data;
+
+ /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */
+ struct {
+ struct aws_mutex lock;
+
+ /* New `aws_h2_stream *` that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_stream_list;
+
+ /* New `aws_h2_frames *`, connection control frames created by user that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_frame_list;
+
+ /* New `aws_h2_pending_settings *` created by user that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_settings_list;
+
+ /* New `aws_h2_pending_ping *` created by user that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_ping_list;
+
+ /* New `aws_h2_pending_goaway *` created by user that haven't sent yet */
+ struct aws_linked_list pending_goaway_list;
+
+ bool is_cross_thread_work_task_scheduled;
+
+ /* The window_update value for `thread_data.window_size_self` that haven't applied yet */
+ size_t window_update_size;
+
+ /* For checking status from outside the event-loop thread. */
+ bool is_open;
+
+ /* If non-zero, reason to immediately reject new streams. (ex: closing) */
+ int new_stream_error_code;
+
+ /* Last-stream-id sent in most recent GOAWAY frame. Defaults to AWS_H2_STREAM_ID_MAX + 1 indicates no GOAWAY has
+ * been sent so far.*/
+ uint32_t goaway_sent_last_stream_id;
+ /* aws_http2_error_code sent in most recent GOAWAY frame. Defaults to 0, check goaway_sent_last_stream_id for
+ * any GOAWAY has sent or not */
+ uint32_t goaway_sent_http2_error_code;
+
+ /* Last-stream-id received in most recent GOAWAY frame. Defaults to AWS_H2_STREAM_ID_MAX + 1 indicates no GOAWAY
+ * has been received so far.*/
+ uint32_t goaway_received_last_stream_id;
+ /* aws_http2_error_code received in most recent GOAWAY frame. Defaults to 0, check
+ * goaway_received_last_stream_id for any GOAWAY has received or not */
+ uint32_t goaway_received_http2_error_code;
+
+ /* For checking settings received from peer from outside the event-loop thread. */
+ uint32_t settings_peer[AWS_HTTP2_SETTINGS_END_RANGE];
+ /* For checking local settings to send/sent to peer from outside the event-loop thread. */
+ uint32_t settings_self[AWS_HTTP2_SETTINGS_END_RANGE];
+ } synced_data;
+};
+
+struct aws_h2_pending_settings {
+ struct aws_http2_setting *settings_array;
+ size_t num_settings;
+ struct aws_linked_list_node node;
+ /* user callback */
+ void *user_data;
+ aws_http2_on_change_settings_complete_fn *on_completed;
+};
+
+struct aws_h2_pending_ping {
+ uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE];
+ /* For calculating round-trip time */
+ uint64_t started_time;
+ struct aws_linked_list_node node;
+ /* user callback */
+ void *user_data;
+ aws_http2_on_ping_complete_fn *on_completed;
+};
+
+struct aws_h2_pending_goaway {
+ bool allow_more_streams;
+ uint32_t http2_error;
+ struct aws_byte_cursor debug_data;
+ struct aws_linked_list_node node;
+};
+
+/**
+ * The action which caused the stream to close.
+ */
+enum aws_h2_stream_closed_when {
+ AWS_H2_STREAM_CLOSED_UNKNOWN,
+ AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM,
+ AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED,
+ AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT,
+};
+
+enum aws_h2_data_encode_status {
+ AWS_H2_DATA_ENCODE_COMPLETE,
+ AWS_H2_DATA_ENCODE_ONGOING,
+ AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED, /* stalled reading from body stream */
+ AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES, /* waiting for next manual write */
+ AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED, /* stalled due to reduced window size */
+};
+
+/* When window size is too small to fit the possible padding into it, we stop sending data and wait for WINDOW_UPDATE */
+#define AWS_H2_MIN_WINDOW_SIZE (256)
+
+/* Private functions called from tests... */
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http2_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options);
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http2_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options);
+
+AWS_EXTERN_C_END
+
+/* Private functions called from multiple .c files... */
+
+/**
+ * Enqueue outgoing frame.
+ * Connection takes ownership of frame.
+ * Frames are sent into FIFO order.
+ * Do not enqueue DATA frames, these are sent by other means when the frame queue is empty.
+ */
+void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame);
+
+/**
+ * Invoked immediately after a stream enters the CLOSED state.
+ * The connection will remove the stream from its "active" datastructures,
+ * guaranteeing that no further decoder callbacks are invoked on the stream.
+ *
+ * This should NOT be invoked in the case of a "Connection Error",
+ * though a "Stream Error", in which a RST_STREAM is sent and the stream
+ * is closed early, would invoke this.
+ */
+int aws_h2_connection_on_stream_closed(
+ struct aws_h2_connection *connection,
+ struct aws_h2_stream *stream,
+ enum aws_h2_stream_closed_when closed_when,
+ int aws_error_code);
+
+/**
+ * Send RST_STREAM and close a stream reserved via PUSH_PROMISE.
+ */
+int aws_h2_connection_send_rst_and_close_reserved_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ uint32_t h2_error_code);
+
+/**
+ * Error happens while writing into channel, shutdown the connection. Only called within the eventloop thread
+ */
+void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *connection, int error_code);
+
+/**
+ * Try to write outgoing frames, if the outgoing-frames-task isn't scheduled, run it immediately.
+ */
+void aws_h2_try_write_outgoing_frames(struct aws_h2_connection *connection);
+
+#endif /* AWS_HTTP_H2_CONNECTION_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h
new file mode 100644
index 00000000000..bd8a7199a1e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h
@@ -0,0 +1,121 @@
+#ifndef AWS_HTTP_H2_DECODER_H
+#define AWS_HTTP_H2_DECODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_frames.h>
+#include <aws/http/private/http_impl.h>
+
+/* Decoder design goals:
+ * - Minimize state tracking and verification required by user.
+ * For example, we have _begin()/_i()/_end() callbacks when something happens N times.
+ * The _begin() and _end() callbacks tell the user when to transition states.
+ * Without them the user needs to be like, oh, I was doing X but now I'm doing Y,
+ * so I guess I need to end X and start Y.
+
+ * - A callback should result in 1 distinct action.
+ * For example, we have distinct callbacks for `on_ping()` and `on_ping_ack()`.
+ * We COULD have had just one `on_ping(bool ack)` callback, but since user must
+ * take two complete different actions based on the ACK, we opted for two callbacks.
+ */
+
+/* Return a failed aws_h2err from any callback to stop the decoder and cause a Connection Error */
+struct aws_h2_decoder_vtable {
+ /* For HEADERS header-block: _begin() is called, then 0+ _i() calls, then _end().
+ * No other decoder callbacks will occur in this time.
+ * If something is malformed, no further _i() calls occur, and it is reported in _end() */
+ struct aws_h2err (*on_headers_begin)(uint32_t stream_id, void *userdata);
+ struct aws_h2err (*on_headers_i)(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type,
+ void *userdata);
+ struct aws_h2err (
+ *on_headers_end)(uint32_t stream_id, bool malformed, enum aws_http_header_block block_type, void *userdata);
+
+ /* For PUSH_PROMISE header-block: _begin() is called, then 0+ _i() calls, then _end().
+ * No other decoder callbacks will occur in this time.
+ * If something is malformed, no further _i() calls occur, and it is reported in _end() */
+ struct aws_h2err (*on_push_promise_begin)(uint32_t stream_id, uint32_t promised_stream_id, void *userdata);
+ struct aws_h2err (*on_push_promise_i)(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ void *userdata);
+ struct aws_h2err (*on_push_promise_end)(uint32_t stream_id, bool malformed, void *userdata);
+
+ /* For DATA frame: _begin() is called, then 0+ _i() calls, then _end().
+ * No other decoder callbacks will occur in this time */
+ struct aws_h2err (*on_data_begin)(
+ uint32_t stream_id,
+ uint32_t payload_len, /* Whole payload length including padding and padding length */
+ uint32_t total_padding_bytes, /* The length of padding and the byte for padding length */
+ bool end_stream,
+ void *userdata);
+ struct aws_h2err (*on_data_i)(uint32_t stream_id, struct aws_byte_cursor data, void *userdata);
+ struct aws_h2err (*on_data_end)(uint32_t stream_id, void *userdata);
+
+ /* Called at end of DATA frame containing the END_STREAM flag.
+ * OR called at end of header-block which began with HEADERS frame containing the END_STREAM flag */
+ struct aws_h2err (*on_end_stream)(uint32_t stream_id, void *userdata);
+
+ /* Called once for RST_STREAM frame */
+ struct aws_h2err (*on_rst_stream)(uint32_t stream_id, uint32_t error_code, void *userdata);
+
+ /* Called once For PING frame with ACK flag set */
+ struct aws_h2err (*on_ping_ack)(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+
+ /* Called once for PING frame (no ACK flag set)*/
+ struct aws_h2err (*on_ping)(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+
+ /* Called once for SETTINGS frame with ACK flag */
+ struct aws_h2err (*on_settings_ack)(void *userdata);
+
+ /* Called once for SETTINGS frame, without ACK flag */
+ struct aws_h2err (
+ *on_settings)(const struct aws_http2_setting *settings_array, size_t num_settings, void *userdata);
+
+ /* Called once for GOAWAY frame */
+ struct aws_h2err (
+ *on_goaway)(uint32_t last_stream, uint32_t error_code, struct aws_byte_cursor debug_data, void *userdata);
+
+ /* Called once for WINDOW_UPDATE frame */
+ struct aws_h2err (*on_window_update)(uint32_t stream_id, uint32_t window_size_increment, void *userdata);
+};
+
+/**
+ * Structure used to initialize an `aws_h2_decoder`.
+ */
+struct aws_h2_decoder_params {
+ struct aws_allocator *alloc;
+ const struct aws_h2_decoder_vtable *vtable;
+ void *userdata;
+ const void *logging_id;
+ bool is_server;
+
+ /* If true, do not expect the connection preface and immediately accept any frame type.
+ * Only set this when testing the decoder itself */
+ bool skip_connection_preface;
+};
+
+struct aws_h2_decoder;
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API struct aws_h2_decoder *aws_h2_decoder_new(struct aws_h2_decoder_params *params);
+AWS_HTTP_API void aws_h2_decoder_destroy(struct aws_h2_decoder *decoder);
+
+/* If failed aws_h2err returned, it is a Connection Error */
+AWS_HTTP_API struct aws_h2err aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data);
+
+AWS_HTTP_API void aws_h2_decoder_set_setting_header_table_size(struct aws_h2_decoder *decoder, uint32_t data);
+AWS_HTTP_API void aws_h2_decoder_set_setting_enable_push(struct aws_h2_decoder *decoder, uint32_t data);
+AWS_HTTP_API void aws_h2_decoder_set_setting_max_frame_size(struct aws_h2_decoder *decoder, uint32_t data);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H2_DECODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h
new file mode 100644
index 00000000000..23c1daf1ec0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h
@@ -0,0 +1,299 @@
+#ifndef AWS_HTTP_H2_FRAMES_H
+#define AWS_HTTP_H2_FRAMES_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection.h>
+#include <aws/http/private/hpack.h>
+#include <aws/http/request_response.h>
+
+#include <aws/common/byte_buf.h>
+
+/* Ids for each frame type (RFC-7540 6) */
+enum aws_h2_frame_type {
+ AWS_H2_FRAME_T_DATA = 0x00,
+ AWS_H2_FRAME_T_HEADERS = 0x01,
+ AWS_H2_FRAME_T_PRIORITY = 0x02,
+ AWS_H2_FRAME_T_RST_STREAM = 0x03,
+ AWS_H2_FRAME_T_SETTINGS = 0x04,
+ AWS_H2_FRAME_T_PUSH_PROMISE = 0x05,
+ AWS_H2_FRAME_T_PING = 0x06,
+ AWS_H2_FRAME_T_GOAWAY = 0x07,
+ AWS_H2_FRAME_T_WINDOW_UPDATE = 0x08,
+ AWS_H2_FRAME_T_CONTINUATION = 0x09,
+ AWS_H2_FRAME_T_UNKNOWN,
+ AWS_H2_FRAME_TYPE_COUNT,
+};
+
+/* Represents flags that may be set on a frame (RFC-7540 6) */
+enum aws_h2_frame_flag {
+ AWS_H2_FRAME_F_ACK = 0x01,
+ AWS_H2_FRAME_F_END_STREAM = 0x01,
+ AWS_H2_FRAME_F_END_HEADERS = 0x04,
+ AWS_H2_FRAME_F_PADDED = 0x08,
+ AWS_H2_FRAME_F_PRIORITY = 0x20,
+};
+
+/* Pairs the AWS_ERROR_* to show our API user,
+ * along with the AWS_HTTP2_ERR_* that should
+ * be sent to the peer via RST_STREAM or GOAWAY.
+ *
+ * Used in place of normal error handling in functions that may result
+ * in an HTTP/2 Connection Error or Stream Error.
+ */
+struct aws_h2err {
+ enum aws_http2_error_code h2_code;
+ int aws_code;
+};
+
+#define AWS_H2ERR_SUCCESS \
+ (struct aws_h2err) { .h2_code = 0, .aws_code = 0 }
+
+#define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) /* must fit in 3 bytes */
+#define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) /* cannot use high bit */
+#define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) /* cannot use high bit */
+#define AWS_H2_FRAME_PREFIX_SIZE (9)
+#define AWS_H2_INIT_WINDOW_SIZE (65535) /* Defined initial window size */
+
+/* Legal min(inclusive) and max(inclusive) for each setting */
+extern const uint32_t aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_END_RANGE][2];
+
+/* Initial values for settings RFC-7540 6.5.2 */
+AWS_HTTP_API
+extern const uint32_t aws_h2_settings_initial[AWS_HTTP2_SETTINGS_END_RANGE];
+
+/* This magic string must be the very first thing a client sends to the server.
+ * See RFC-7540 3.5 - HTTP/2 Connection Preface.
+ * Exported for tests */
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_h2_connection_preface_client_string;
+
+/**
+ * Present in all frames that may have set AWS_H2_FRAME_F_PRIORITY
+ *
+ * Encoded as:
+ * +-+-------------------------------------------------------------+
+ * |E| Stream Dependency (31) |
+ * +-+-------------+-----------------------------------------------+
+ * | Weight (8) |
+ * +-+-------------+
+ */
+struct aws_h2_frame_priority_settings {
+ uint32_t stream_dependency;
+ bool stream_dependency_exclusive;
+ uint8_t weight;
+};
+
+/**
+ * A frame to be encoded.
+ * (in the case of HEADERS and PUSH_PROMISE, it might turn into multiple frames due to CONTINUATION)
+ */
+struct aws_h2_frame {
+ const struct aws_h2_frame_vtable *vtable;
+ struct aws_allocator *alloc;
+ struct aws_linked_list_node node;
+ enum aws_h2_frame_type type;
+ uint32_t stream_id;
+
+ /* If true, frame will be sent before those with normal priority.
+ * Useful for frames like PING ACK where low latency is important. */
+ bool high_priority;
+};
+
+/* Used to encode a frame */
+struct aws_h2_frame_encoder {
+ struct aws_allocator *allocator;
+ const void *logging_id;
+ struct aws_hpack_encoder hpack;
+ struct aws_h2_frame *current_frame;
+
+ /* Settings for frame encoder, which is based on the settings received from peer */
+ struct {
+ /* the size of the largest frame payload */
+ uint32_t max_frame_size;
+ } settings;
+
+ bool has_errored;
+};
+
+typedef void aws_h2_frame_destroy_fn(struct aws_h2_frame *frame_base);
+typedef int aws_h2_frame_encode_fn(
+ struct aws_h2_frame *frame_base,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *complete);
+
+struct aws_h2_frame_vtable {
+ aws_h2_frame_destroy_fn *destroy;
+ aws_h2_frame_encode_fn *encode;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type);
+
+AWS_HTTP_API
+const char *aws_http2_error_code_to_str(enum aws_http2_error_code h2_error_code);
+
+/**
+ * Specify which HTTP/2 error-code will be sent to the peer in a GOAWAY or RST_STREAM frame.
+ *
+ * The AWS_ERROR reported to the API user will be AWS_ERROR_HTTP_PROTOCOL_ERROR.
+ */
+AWS_HTTP_API
+struct aws_h2err aws_h2err_from_h2_code(enum aws_http2_error_code h2_error_code);
+
+/**
+ * Specify which AWS_ERROR will be reported to the API user.
+ *
+ * The peer will be sent a GOAWAY or RST_STREAM with the INTERNAL_ERROR HTTP/2 error-code.
+ */
+AWS_HTTP_API
+struct aws_h2err aws_h2err_from_aws_code(int aws_error_code);
+
+AWS_HTTP_API
+struct aws_h2err aws_h2err_from_last_error(void);
+
+AWS_HTTP_API
+bool aws_h2err_success(struct aws_h2err err);
+
+AWS_HTTP_API
+bool aws_h2err_failed(struct aws_h2err err);
+
+/* Raises AWS_ERROR_INVALID_ARGUMENT if stream_id is 0 or exceeds AWS_H2_MAX_STREAM_ID */
+AWS_HTTP_API
+int aws_h2_validate_stream_id(uint32_t stream_id);
+
+/**
+ * The process of encoding a frame looks like:
+ * 1. Create a encoder object on the stack and initialize with aws_h2_frame_encoder_init
+ * 2. Encode the frame using aws_h2_encode_frame()
+ */
+AWS_HTTP_API
+int aws_h2_frame_encoder_init(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_allocator *allocator,
+ const void *logging_id);
+
+AWS_HTTP_API
+void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder);
+
+/**
+ * Attempt to encode frame into output buffer.
+ * AWS_OP_ERR is returned if encoder encounters an unrecoverable error.
+ * frame_complete will be set true if the frame finished encoding.
+ *
+ * If frame_complete is false then we MUST call aws_h2_encode_frame() again
+ * with all the same inputs, when we have a fresh buffer (it would be illegal
+ * to encode a different frame).
+ */
+AWS_HTTP_API
+int aws_h2_encode_frame(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_h2_frame *frame,
+ struct aws_byte_buf *output,
+ bool *frame_complete);
+
+/**
+ * Attempt to encode a DATA frame into the output buffer.
+ * The body_stream will be read into the available space (up to MAX_FRAME_SIZE).
+ * AWS_OP_ERR is returned if encoder encounters an unrecoverable error.
+ * body_complete will be set true if encoder reaches the end of the body_stream.
+ * body_stalled will be true if aws_input_stream_read() stopped early (didn't
+ * complete, though more space was available).
+ *
+ * Each call to this function encodes a complete DATA frame, or nothing at all,
+ * so it's always safe to encode a different frame type or the body of a different stream
+ * after calling this.
+ */
+AWS_HTTP_API
+int aws_h2_encode_data_frame(
+ struct aws_h2_frame_encoder *encoder,
+ uint32_t stream_id,
+ struct aws_input_stream *body_stream,
+ bool body_ends_stream,
+ uint8_t pad_length,
+ int32_t *stream_window_size_peer,
+ size_t *connection_window_size_peer,
+ struct aws_byte_buf *output,
+ bool *body_complete,
+ bool *body_stalled);
+
+AWS_HTTP_API
+void aws_h2_frame_destroy(struct aws_h2_frame *frame);
+
+/**
+ * This frame type may actually end up encoding multiple frames
+ * (HEADERS followed by 0 or more CONTINUATION frames).
+ */
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_headers(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_http_headers *headers,
+ bool end_stream,
+ uint8_t pad_length,
+ const struct aws_h2_frame_priority_settings *optional_priority);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_priority(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_h2_frame_priority_settings *priority);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_rst_stream(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t error_code);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ bool ack);
+
+/**
+ * This frame type may actually end up encoding multiple frames
+ * (PUSH_PROMISE followed 0 or more CONTINUATION frames).
+ */
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_push_promise(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t promised_stream_id,
+ const struct aws_http_headers *headers,
+ uint8_t pad_length);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_ping(
+ struct aws_allocator *allocator,
+ bool ack,
+ const uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_goaway(
+ struct aws_allocator *allocator,
+ uint32_t last_stream_id,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_window_update(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t window_size_increment);
+
+AWS_HTTP_API void aws_h2_frame_encoder_set_setting_header_table_size(
+ struct aws_h2_frame_encoder *encoder,
+ uint32_t data);
+AWS_HTTP_API void aws_h2_frame_encoder_set_setting_max_frame_size(struct aws_h2_frame_encoder *encoder, uint32_t data);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H2_FRAMES_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h
new file mode 100644
index 00000000000..62de106c3ec
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h
@@ -0,0 +1,190 @@
+#ifndef AWS_HTTP_H2_STREAM_H
+#define AWS_HTTP_H2_STREAM_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_frames.h>
+#include <aws/http/private/request_response_impl.h>
+
+#include <aws/common/mutex.h>
+#include <aws/io/channel.h>
+
+#include <inttypes.h>
+
+#define AWS_H2_STREAM_LOGF(level, stream, text, ...) \
+ AWS_LOGF_##level( \
+ AWS_LS_HTTP_STREAM, \
+ "id=%" PRIu32 " connection=%p state=%s: " text, \
+ (stream)->base.id, \
+ (void *)(stream)->base.owning_connection, \
+ aws_h2_stream_state_to_str((stream)->thread_data.state), \
+ __VA_ARGS__)
+#define AWS_H2_STREAM_LOG(level, stream, text) AWS_H2_STREAM_LOGF(level, (stream), "%s", (text))
+
+enum aws_h2_stream_state {
+ /* Initial state, before anything sent or received. */
+ AWS_H2_STREAM_STATE_IDLE,
+ /* (server-only) stream-id was reserved via PUSH_PROMISE on another stream,
+ * but HEADERS for this stream have not been sent yet */
+ AWS_H2_STREAM_STATE_RESERVED_LOCAL,
+ /* (client-only) stream-id was reserved via PUSH_PROMISE on another stream,
+ * but HEADERS for this stream have not been received yet */
+ AWS_H2_STREAM_STATE_RESERVED_REMOTE,
+ /* Neither side is done sending their message. */
+ AWS_H2_STREAM_STATE_OPEN,
+ /* This side is done sending message (END_STREAM), but peer is not done. */
+ AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL,
+ /* Peer is done sending message (END_STREAM), but this side is not done */
+ AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE,
+ /* Both sides done sending message (END_STREAM),
+ * or either side has sent RST_STREAM */
+ AWS_H2_STREAM_STATE_CLOSED,
+
+ AWS_H2_STREAM_STATE_COUNT,
+};
+
+/* simplified stream state for API implementation */
+enum aws_h2_stream_api_state {
+ AWS_H2_STREAM_API_STATE_INIT,
+ AWS_H2_STREAM_API_STATE_ACTIVE,
+ AWS_H2_STREAM_API_STATE_COMPLETE,
+};
+
+/* Indicates the state of the body of the HTTP/2 stream */
+enum aws_h2_stream_body_state {
+ AWS_H2_STREAM_BODY_STATE_NONE, /* Has no body for the HTTP/2 stream */
+ AWS_H2_STREAM_BODY_STATE_WAITING_WRITES, /* Has no active body, but waiting for more to be
+ write */
+ AWS_H2_STREAM_BODY_STATE_ONGOING, /* Has active ongoing body */
+};
+
+/* represents a write operation, which will be turned into a data frame */
+struct aws_h2_stream_data_write {
+ struct aws_linked_list_node node;
+ struct aws_input_stream *data_stream;
+ aws_http2_stream_write_data_complete_fn *on_complete;
+ void *user_data;
+ bool end_stream;
+};
+
+struct aws_h2_stream {
+ struct aws_http_stream base;
+
+ struct aws_linked_list_node node;
+ struct aws_channel_task cross_thread_work_task;
+
+ /* Only the event-loop thread may touch this data */
+ struct {
+ enum aws_h2_stream_state state;
+ int32_t window_size_peer;
+ /* The local window size.
+ * We allow this value exceed the max window size (int64 can hold much more than 0x7FFFFFFF),
+ * We leave it up to the remote peer to detect whether the max window size has been exceeded. */
+ int64_t window_size_self;
+ struct aws_http_message *outgoing_message;
+ /* All queued writes. If the message provides a body stream, it will be first in this list
+ * This list can drain, which results in the stream being put to sleep (moved to waiting_streams_list in
+ * h2_connection). */
+ struct aws_linked_list outgoing_writes; /* aws_http2_stream_data_write */
+ bool received_main_headers;
+
+ bool content_length_received;
+ /* Set if incoming message has content-length header */
+ uint64_t incoming_content_length;
+ /* The total length of payload of data frame received */
+ uint64_t incoming_data_length;
+ /* Indicates that the stream is currently in the waiting_streams_list and is
+ * asleep. When stream needs to be awaken, moving the stream back to the outgoing_streams_list and set this bool
+ * to false */
+ bool waiting_for_writes;
+ } thread_data;
+
+ /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */
+ struct {
+ struct aws_mutex lock;
+
+ bool is_cross_thread_work_task_scheduled;
+
+ /* The window_update value for `thread_data.window_size_self` that haven't applied yet */
+ size_t window_update_size;
+
+ /* The combined aws_http2_error_code user wanted to send to remote peer via rst_stream and internal aws error
+ * code we want to inform user about. */
+ struct aws_h2err reset_error;
+ bool reset_called;
+ bool manual_write_ended;
+
+ /* Simplified stream state. */
+ enum aws_h2_stream_api_state api_state;
+
+ /* any data streams sent manually via aws_http2_stream_write_data */
+ struct aws_linked_list pending_write_list; /* aws_h2_stream_pending_data */
+ } synced_data;
+ bool manual_write;
+
+ /* Store the sent reset HTTP/2 error code, set to -1, if none has sent so far */
+ int64_t sent_reset_error_code;
+
+ /* Store the received reset HTTP/2 error code, set to -1, if none has received so far */
+ int64_t received_reset_error_code;
+};
+
+const char *aws_h2_stream_state_to_str(enum aws_h2_stream_state state);
+
+struct aws_h2_stream *aws_h2_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *stream);
+
+struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self);
+
+/* Connection is ready to send frames from stream now */
+int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_body_state *body_state);
+
+/* Completes stream for one reason or another, clean up any pending writes/resources. */
+void aws_h2_stream_complete(struct aws_h2_stream *stream, int error_code);
+
+/* Connection is ready to send data from stream now.
+ * Stream may complete itself during this call.
+ * data_encode_status: see `aws_h2_data_encode_status`
+ */
+int aws_h2_stream_encode_data_frame(
+ struct aws_h2_stream *stream,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ int *data_encode_status);
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_begin(struct aws_h2_stream *stream);
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_i(
+ struct aws_h2_stream *stream,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type);
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_end(
+ struct aws_h2_stream *stream,
+ bool malformed,
+ enum aws_http_header_block block_type);
+
+struct aws_h2err aws_h2_stream_on_decoder_push_promise(struct aws_h2_stream *stream, uint32_t promised_stream_id);
+struct aws_h2err aws_h2_stream_on_decoder_data_begin(
+ struct aws_h2_stream *stream,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream);
+struct aws_h2err aws_h2_stream_on_decoder_data_i(struct aws_h2_stream *stream, struct aws_byte_cursor data);
+struct aws_h2err aws_h2_stream_on_decoder_window_update(
+ struct aws_h2_stream *stream,
+ uint32_t window_size_increment,
+ bool *window_resume);
+struct aws_h2err aws_h2_stream_on_decoder_end_stream(struct aws_h2_stream *stream);
+struct aws_h2err aws_h2_stream_on_decoder_rst_stream(struct aws_h2_stream *stream, uint32_t h2_error_code);
+
+int aws_h2_stream_activate(struct aws_http_stream *stream);
+
+#endif /* AWS_HTTP_H2_STREAM_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h
new file mode 100644
index 00000000000..d0507c2aff7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h
@@ -0,0 +1,297 @@
+#ifndef AWS_HTTP_HPACK_H
+#define AWS_HTTP_HPACK_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/request_response.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/compression/huffman.h>
+
+/**
+ * Result of aws_hpack_decode() call.
+ * If a complete entry has not been decoded yet, type is ONGOING.
+ * Otherwise, type informs which data to look at.
+ */
+struct aws_hpack_decode_result {
+ enum aws_hpack_decode_type {
+ AWS_HPACK_DECODE_T_ONGOING,
+ AWS_HPACK_DECODE_T_HEADER_FIELD,
+ AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE,
+ } type;
+
+ union {
+ /* If type is AWS_HPACK_DECODE_T_HEADER_FIELD */
+ struct aws_http_header header_field;
+
+ /* If type is AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE */
+ size_t dynamic_table_resize;
+ } data;
+};
+
+/**
+ * Controls whether non-indexed strings will use Huffman encoding.
+ * In SMALLEST mode, strings will only be sent with Huffman encoding if it makes them smaller.
+ *
+ * Note: This does not control compression via "indexing",
+ * for that, see `aws_http_header_compression`.
+ * This only controls how string values are encoded when they're not already in a table.
+ */
+enum aws_hpack_huffman_mode {
+ AWS_HPACK_HUFFMAN_SMALLEST,
+ AWS_HPACK_HUFFMAN_NEVER,
+ AWS_HPACK_HUFFMAN_ALWAYS,
+};
+
+/**
+ * Maintains the dynamic table.
+ * Insertion is backwards, indexing is forwards
+ */
+struct aws_hpack_context {
+ struct aws_allocator *allocator;
+
+ enum aws_http_log_subject log_subject;
+ const void *log_id;
+
+ struct {
+ /* Array of headers, pointers to memory we alloced, which needs to be cleaned up whenever we move an entry out
+ */
+ struct aws_http_header *buffer;
+ size_t buffer_capacity; /* Number of http_headers that can fit in buffer */
+
+ size_t num_elements;
+ size_t index_0;
+
+ /* Size in bytes, according to [4.1] */
+ size_t size;
+ size_t max_size;
+
+ /* aws_http_header * -> size_t */
+ struct aws_hash_table reverse_lookup;
+ /* aws_byte_cursor * -> size_t */
+ struct aws_hash_table reverse_lookup_name_only;
+ } dynamic_table;
+};
+
+/**
+ * Encodes outgoing headers.
+ */
+struct aws_hpack_encoder {
+ const void *log_id;
+
+ struct aws_huffman_encoder huffman_encoder;
+ enum aws_hpack_huffman_mode huffman_mode;
+
+ struct aws_hpack_context context;
+
+ struct {
+ size_t latest_value;
+ size_t smallest_value;
+ bool pending;
+ } dynamic_table_size_update;
+};
+
+/**
+ * Decodes incoming headers
+ */
+struct aws_hpack_decoder {
+ const void *log_id;
+
+ struct aws_huffman_decoder huffman_decoder;
+
+ struct aws_hpack_context context;
+
+ /* TODO: check the new (RFC 9113 - 4.3.1) to make sure we did it right */
+ /* SETTINGS_HEADER_TABLE_SIZE from http2 */
+ size_t dynamic_table_protocol_max_size_setting;
+
+ /* PRO TIP: Don't union progress_integer and progress_string together, since string_decode calls integer_decode */
+ struct hpack_progress_integer {
+ enum {
+ HPACK_INTEGER_STATE_INIT,
+ HPACK_INTEGER_STATE_VALUE,
+ } state;
+ uint8_t bit_count;
+ } progress_integer;
+
+ struct hpack_progress_string {
+ enum {
+ HPACK_STRING_STATE_INIT,
+ HPACK_STRING_STATE_LENGTH,
+ HPACK_STRING_STATE_VALUE,
+ } state;
+ bool use_huffman;
+ uint64_t length;
+ } progress_string;
+
+ struct hpack_progress_entry {
+ enum {
+ HPACK_ENTRY_STATE_INIT,
+ /* Indexed header field: just 1 state. read index, find name and value at index */
+ HPACK_ENTRY_STATE_INDEXED,
+ /* Literal header field: name may be indexed OR literal, value is always literal */
+ HPACK_ENTRY_STATE_LITERAL_BEGIN,
+ HPACK_ENTRY_STATE_LITERAL_NAME_STRING,
+ HPACK_ENTRY_STATE_LITERAL_VALUE_STRING,
+ /* Dynamic table resize: just 1 state. read new size */
+ HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE,
+ /* Done */
+ HPACK_ENTRY_STATE_COMPLETE,
+ } state;
+
+ union {
+ struct {
+ uint64_t index;
+ } indexed;
+
+ struct hpack_progress_literal {
+ uint8_t prefix_size;
+ enum aws_http_header_compression compression;
+ uint64_t name_index;
+ size_t name_length;
+ } literal;
+
+ struct {
+ uint64_t size;
+ } dynamic_table_resize;
+ } u;
+
+ enum aws_hpack_decode_type type;
+
+ /* Scratch holds header name and value while decoding */
+ struct aws_byte_buf scratch;
+ } progress_entry;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* Library-level init and shutdown */
+void aws_hpack_static_table_init(struct aws_allocator *allocator);
+void aws_hpack_static_table_clean_up(void);
+
+AWS_HTTP_API
+void aws_hpack_context_init(
+ struct aws_hpack_context *aws_hpack_context,
+ struct aws_allocator *allocator,
+ enum aws_http_log_subject log_subject,
+ const void *log_id);
+
+AWS_HTTP_API
+void aws_hpack_context_clean_up(struct aws_hpack_context *context);
+
+/* Returns the hpack size of a header (name.len + value.len + 32) [4.1] */
+AWS_HTTP_API
+size_t aws_hpack_get_header_size(const struct aws_http_header *header);
+
+/* Returns the number of elements in dynamic table now */
+AWS_HTTP_API
+size_t aws_hpack_get_dynamic_table_num_elements(const struct aws_hpack_context *context);
+
+size_t aws_hpack_get_dynamic_table_max_size(const struct aws_hpack_context *context);
+
+AWS_HTTP_API
+const struct aws_http_header *aws_hpack_get_header(const struct aws_hpack_context *context, size_t index);
+
+/* A return value of 0 indicates that the header wasn't found */
+AWS_HTTP_API
+size_t aws_hpack_find_index(
+ const struct aws_hpack_context *context,
+ const struct aws_http_header *header,
+ bool search_value,
+ bool *found_value);
+
+AWS_HTTP_API
+int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_http_header *header);
+
+/**
+ * Set the max size of the dynamic table (in octets). The size of each header is name.len + value.len + 32 [4.1].
+ */
+AWS_HTTP_API
+int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size);
+
+AWS_HTTP_API
+void aws_hpack_encoder_init(struct aws_hpack_encoder *encoder, struct aws_allocator *allocator, const void *log_id);
+
+AWS_HTTP_API
+void aws_hpack_encoder_clean_up(struct aws_hpack_encoder *encoder);
+
+/* Call this after receiving SETTINGS_HEADER_TABLE_SIZE from peer and sending the ACK.
+ * The hpack-encoder remembers all size updates, and makes sure to encode the proper
+ * number of Dynamic Table Size Updates the next time a header block is sent. */
+AWS_HTTP_API
+void aws_hpack_encoder_update_max_table_size(struct aws_hpack_encoder *encoder, uint32_t new_max_size);
+
+AWS_HTTP_API
+void aws_hpack_encoder_set_huffman_mode(struct aws_hpack_encoder *encoder, enum aws_hpack_huffman_mode mode);
+
+/**
+ * Encode header-block into the output.
+ * This function will mutate hpack, so an error means hpack can no longer be used.
+ * Note that output will be dynamically resized if it's too short.
+ */
+AWS_HTTP_API
+int aws_hpack_encode_header_block(
+ struct aws_hpack_encoder *encoder,
+ const struct aws_http_headers *headers,
+ struct aws_byte_buf *output);
+
+AWS_HTTP_API
+void aws_hpack_decoder_init(struct aws_hpack_decoder *decoder, struct aws_allocator *allocator, const void *log_id);
+
+AWS_HTTP_API
+void aws_hpack_decoder_clean_up(struct aws_hpack_decoder *decoder);
+
+/* Call this after sending SETTINGS_HEADER_TABLE_SIZE and receiving ACK from the peer.
+ * The hpack-decoder remembers all size updates, and makes sure that the peer
+ * sends the appropriate Dynamic Table Size Updates in the next header block we receive. */
+AWS_HTTP_API
+void aws_hpack_decoder_update_max_table_size(struct aws_hpack_decoder *decoder, uint32_t new_max_size);
+
+/**
+ * Decode the next entry in the header-block-fragment.
+ * If result->type is ONGOING, then call decode() again with more data to resume decoding.
+ * Otherwise, type is either a HEADER_FIELD or a DYNAMIC_TABLE_RESIZE.
+ *
+ * If an error occurs, the decoder is broken and decode() must not be called again.
+ */
+AWS_HTTP_API
+int aws_hpack_decode(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_hpack_decode_result *result);
+
+/*******************************************************************************
+ * Private functions for encoder/decoder, but public for testing purposes
+ ******************************************************************************/
+
+/* Output will be dynamically resized if it's too short */
+AWS_HTTP_API
+int aws_hpack_encode_integer(uint64_t integer, uint8_t starting_bits, uint8_t prefix_size, struct aws_byte_buf *output);
+
+/* Output will be dynamically resized if it's too short */
+AWS_HTTP_API
+int aws_hpack_encode_string(
+ struct aws_hpack_encoder *encoder,
+ struct aws_byte_cursor to_encode,
+ struct aws_byte_buf *output);
+
+AWS_HTTP_API
+int aws_hpack_decode_integer(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ uint8_t prefix_size,
+ uint64_t *integer,
+ bool *complete);
+
+AWS_HTTP_API
+int aws_hpack_decode_string(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_byte_buf *output,
+ bool *complete);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_HPACK_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def
new file mode 100644
index 00000000000..f9abd74f3d4
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def
@@ -0,0 +1,74 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef HEADER
+#error "Macro HEADER(index, name) must be defined before including this header file!"
+#endif
+
+#ifndef HEADER_WITH_VALUE
+#error "Macro HEADER_WITH_VALUE(index, name, value) must be defined before including this header file!"
+#endif
+
+HEADER(1, ":authority")
+HEADER_WITH_VALUE(2, ":method", "GET")
+HEADER_WITH_VALUE(3, ":method", "POST")
+HEADER_WITH_VALUE(4, ":path", "/")
+HEADER_WITH_VALUE(5, ":path", "/index.html")
+HEADER_WITH_VALUE(6, ":scheme", "http")
+HEADER_WITH_VALUE(7, ":scheme", "https")
+HEADER_WITH_VALUE(8, ":status", "200")
+HEADER_WITH_VALUE(9, ":status", "204")
+HEADER_WITH_VALUE(10, ":status", "206")
+HEADER_WITH_VALUE(11, ":status", "304")
+HEADER_WITH_VALUE(12, ":status", "400")
+HEADER_WITH_VALUE(13, ":status", "404")
+HEADER_WITH_VALUE(14, ":status", "500")
+HEADER(15, "accept-charset")
+HEADER_WITH_VALUE(16, "accept-encoding", "gzip,deflate")
+HEADER(17, "accept-language")
+HEADER(18, "accept-ranges")
+HEADER(19, "accept")
+HEADER(20, "access-control-allow-origin")
+HEADER(21, "age")
+HEADER(22, "allow")
+HEADER(23, "authorization")
+HEADER(24, "cache-control")
+HEADER(25, "content-disposition")
+HEADER(26, "content-encoding")
+HEADER(27, "content-language")
+HEADER(28, "content-length")
+HEADER(29, "content-location")
+HEADER(30, "content-range")
+HEADER(31, "content-type")
+HEADER(32, "cookie")
+HEADER(33, "date")
+HEADER(34, "etag")
+HEADER(35, "expect")
+HEADER(36, "expires")
+HEADER(37, "from")
+HEADER(38, "host")
+HEADER(39, "if-match")
+HEADER(40, "if-modified-since")
+HEADER(41, "if-none-match")
+HEADER(42, "if-range")
+HEADER(43, "if-unmodified-since")
+HEADER(44, "last-modified")
+HEADER(45, "link")
+HEADER(46, "location")
+HEADER(47, "max-forwards")
+HEADER(48, "proxy-authenticate")
+HEADER(49, "proxy-authorization")
+HEADER(50, "range")
+HEADER(51, "referer")
+HEADER(52, "refresh")
+HEADER(53, "retry-after")
+HEADER(54, "server")
+HEADER(55, "set-cookie")
+HEADER(56, "strict-transport-security")
+HEADER(57, "transfer-encoding")
+HEADER(58, "user-agent")
+HEADER(59, "vary")
+HEADER(60, "via")
+HEADER(61, "www-authenticate")
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h
new file mode 100644
index 00000000000..d9252047e54
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h
@@ -0,0 +1,199 @@
+#ifndef AWS_HTTP2_STREAM_MANAGER_IMPL_H
+#define AWS_HTTP2_STREAM_MANAGER_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/http/http2_stream_manager.h>
+#include <aws/http/private/random_access_set.h>
+
+enum aws_h2_sm_state_type {
+ AWS_H2SMST_READY,
+ AWS_H2SMST_DESTROYING, /* On zero external ref count, can destroy */
+};
+
+enum aws_h2_sm_connection_state_type {
+ AWS_H2SMCST_IDEAL,
+ AWS_H2SMCST_NEARLY_FULL,
+ AWS_H2SMCST_FULL,
+};
+
+/* Live with the streams opening, and if there no outstanding pending acquisition and no opening streams on the
+ * connection, this structure should die */
+struct aws_h2_sm_connection {
+ struct aws_allocator *allocator;
+ struct aws_http2_stream_manager *stream_manager;
+ struct aws_http_connection *connection;
+ uint32_t num_streams_assigned; /* From a stream assigned to the connection until the stream completed
+ or failed to be created from the connection. */
+ uint32_t max_concurrent_streams; /* lower bound between user configured and the other side */
+
+ /* task to send ping periodically from connection thread. */
+ struct aws_ref_count ref_count;
+ struct aws_channel_task ping_task;
+ struct aws_channel_task ping_timeout_task;
+ struct {
+ bool ping_received;
+ bool stopped_new_requests;
+ uint64_t next_ping_task_time;
+ } thread_data;
+
+ enum aws_h2_sm_connection_state_type state;
+};
+
+/* Live from the user request to acquire a stream to the stream completed. */
+struct aws_h2_sm_pending_stream_acquisition {
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
+ struct aws_http_make_request_options options;
+ struct aws_h2_sm_connection *sm_connection; /* The connection to make request to. Keep
+ NULL, until find available one and move it to the pending_make_requests
+ list. */
+ struct aws_http_message *request;
+ struct aws_channel_task make_request_task;
+ aws_http2_stream_manager_on_stream_acquired_fn *callback;
+ void *user_data;
+};
+
+/* connections_acquiring_count, open_stream_count, pending_make_requests_count AND pending_stream_acquisition_count */
+enum aws_sm_count_type {
+ AWS_SMCT_CONNECTIONS_ACQUIRING,
+ AWS_SMCT_OPEN_STREAM,
+ AWS_SMCT_PENDING_MAKE_REQUESTS,
+ AWS_SMCT_PENDING_ACQUISITION,
+ AWS_SMCT_COUNT,
+};
+
+struct aws_http2_stream_manager {
+ struct aws_allocator *allocator;
+ void *shutdown_complete_user_data;
+ aws_http2_stream_manager_shutdown_complete_fn *shutdown_complete_callback;
+ /**
+ * Underlying connection manager. Always has the same life time with the stream manager who owns it.
+ */
+ struct aws_http_connection_manager *connection_manager;
+ /**
+ * Refcount managed by user. Once this drops to zero, the manager state transitions to shutting down
+ */
+ struct aws_ref_count external_ref_count;
+ /**
+ * Internal refcount that keeps connection manager alive.
+ *
+ * It's a sum of connections_acquiring_count, open_stream_count, pending_make_requests_count and
+ * pending_stream_acquisition_count, besides the number of `struct aws_http2_stream_management_transaction` alive.
+ * And one for external usage.
+ *
+ * Once this refcount drops to zero, stream manager should either be cleaned up all the memory all waiting for
+ * the last task to clean un the memory and do nothing else.
+ */
+ struct aws_ref_count internal_ref_count;
+ struct aws_client_bootstrap *bootstrap;
+
+ /* Configurations */
+ size_t max_connections;
+ /* Connection will be closed if 5xx response received from server. */
+ bool close_connection_on_server_error;
+
+ uint64_t connection_ping_period_ns;
+ uint64_t connection_ping_timeout_ns;
+
+ /**
+ * Default is no limit. 0 will be considered as using the default value.
+ * The ideal number of concurrent streams for a connection. Stream manager will try to create a new connection if
+ * one connection reaches this number. But, if the max connections reaches, manager will reuse connections to create
+ * the acquired steams as much as possible. */
+ size_t ideal_concurrent_streams_per_connection;
+ /**
+ * Default is no limit. 0 will be considered as using the default value.
+ * The real number of concurrent streams per connection will be controlled by the minmal value of the setting from
+ * other end and the value here.
+ */
+ size_t max_concurrent_streams_per_connection;
+
+ /**
+ * Task to invoke pending acquisition callbacks asynchronously if stream manager is shutting.
+ */
+ struct aws_event_loop *finish_pending_stream_acquisitions_task_event_loop;
+
+ /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */
+ struct {
+ struct aws_mutex lock;
+ /*
+ * A manager can be in one of two states, READY or SHUTTING_DOWN. The state transition
+ * takes place when ref_count drops to zero.
+ */
+ enum aws_h2_sm_state_type state;
+
+ /**
+ * A set of all connections that meet all requirement to use. Note: there will be connections not in this set,
+ * but hold by the stream manager, which can be tracked by the streams created on it. Set of `struct
+ * aws_h2_sm_connection *`
+ */
+ struct aws_random_access_set ideal_available_set;
+ /**
+ * A set of all available connections that exceed the soft limits set by users. Note: there will be connections
+ * not in this set, but hold by the stream manager, which can be tracked by the streams created. Set of `struct
+ * aws_h2_sm_connection *`
+ */
+ struct aws_random_access_set nonideal_available_set;
+ /* We don't mantain set for connections that is full or "dead" (Cannot make any new streams). We have streams
+ * opening from the connection tracking them */
+
+ /**
+ * The set of all incomplete stream acquisition requests (haven't decide what connection to make the request
+ * to), list of `struct aws_h2_sm_pending_stream_acquisition*`
+ */
+ struct aws_linked_list pending_stream_acquisitions;
+
+ /**
+ * The number of connections acquired from connection manager and not released yet.
+ */
+ size_t holding_connections_count;
+
+ /**
+ * Counts that contributes to the internal refcount.
+ * When the value changes, s_sm_count_increase/decrease_synced needed.
+ *
+ * AWS_SMCT_CONNECTIONS_ACQUIRING: The number of new connections we acquiring from the connection manager.
+ * AWS_SMCT_OPEN_STREAM: The number of streams that opened and not completed yet.
+ * AWS_SMCT_PENDING_MAKE_REQUESTS: The number of streams that scheduled to be made from a connection but haven't
+ * been executed yet.
+ * AWS_SMCT_PENDING_ACQUISITION: The number of all incomplete stream acquisition requests (haven't decide what
+ * connection to make the request to). So that we don't have compute the size of a linked list every time.
+ */
+ size_t internal_refcount_stats[AWS_SMCT_COUNT];
+
+ bool finish_pending_stream_acquisitions_task_scheduled;
+ } synced_data;
+};
+
+/**
+ * Encompasses all of the external operations that need to be done for various
+ * events:
+ * - User level:
+ * stream manager release
+ * stream acquire
+ * - Internal eventloop (anther thread):
+ * connection_acquired
+ * stream_completed
+ * - Internal (can happen from any thread):
+ * connection acquire
+ * connection release
+ *
+ * The transaction is built under the manager's lock (and the internal state is updated optimistically),
+ * but then executed outside of it.
+ */
+struct aws_http2_stream_management_transaction {
+ struct aws_http2_stream_manager *stream_manager;
+ struct aws_allocator *allocator;
+ size_t new_connections;
+ struct aws_h2_sm_connection *sm_connection_to_release;
+ struct aws_linked_list
+ pending_make_requests; /* List of aws_h2_sm_pending_stream_acquisition with chosen connection */
+};
+
+#endif /* AWS_HTTP2_STREAM_MANAGER_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h
new file mode 100644
index 00000000000..8940d545539
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h
@@ -0,0 +1,100 @@
+#ifndef AWS_HTTP_IMPL_H
+#define AWS_HTTP_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+/**
+ * Methods that affect internal processing.
+ * This is NOT a definitive list of methods.
+ */
+enum aws_http_method {
+ AWS_HTTP_METHOD_UNKNOWN, /* Unrecognized value. */
+ AWS_HTTP_METHOD_GET,
+ AWS_HTTP_METHOD_HEAD,
+ AWS_HTTP_METHOD_CONNECT,
+ AWS_HTTP_METHOD_COUNT, /* Number of enums */
+};
+
+/**
+ * Headers that affect internal processing.
+ * This is NOT a definitive list of headers.
+ */
+enum aws_http_header_name {
+ AWS_HTTP_HEADER_UNKNOWN, /* Unrecognized value */
+
+ /* Request pseudo-headers */
+ AWS_HTTP_HEADER_METHOD,
+ AWS_HTTP_HEADER_SCHEME,
+ AWS_HTTP_HEADER_AUTHORITY,
+ AWS_HTTP_HEADER_PATH,
+
+ /* Response pseudo-headers */
+ AWS_HTTP_HEADER_STATUS,
+
+ /* Regular headers */
+ AWS_HTTP_HEADER_CONNECTION,
+ AWS_HTTP_HEADER_CONTENT_LENGTH,
+ AWS_HTTP_HEADER_EXPECT,
+ AWS_HTTP_HEADER_TRANSFER_ENCODING,
+ AWS_HTTP_HEADER_COOKIE,
+ AWS_HTTP_HEADER_SET_COOKIE,
+ AWS_HTTP_HEADER_HOST,
+ AWS_HTTP_HEADER_CACHE_CONTROL,
+ AWS_HTTP_HEADER_MAX_FORWARDS,
+ AWS_HTTP_HEADER_PRAGMA,
+ AWS_HTTP_HEADER_RANGE,
+ AWS_HTTP_HEADER_TE,
+ AWS_HTTP_HEADER_CONTENT_ENCODING,
+ AWS_HTTP_HEADER_CONTENT_TYPE,
+ AWS_HTTP_HEADER_CONTENT_RANGE,
+ AWS_HTTP_HEADER_TRAILER,
+ AWS_HTTP_HEADER_WWW_AUTHENTICATE,
+ AWS_HTTP_HEADER_AUTHORIZATION,
+ AWS_HTTP_HEADER_PROXY_AUTHENTICATE,
+ AWS_HTTP_HEADER_PROXY_AUTHORIZATION,
+ AWS_HTTP_HEADER_AGE,
+ AWS_HTTP_HEADER_EXPIRES,
+ AWS_HTTP_HEADER_DATE,
+ AWS_HTTP_HEADER_LOCATION,
+ AWS_HTTP_HEADER_RETRY_AFTER,
+ AWS_HTTP_HEADER_VARY,
+ AWS_HTTP_HEADER_WARNING,
+ AWS_HTTP_HEADER_UPGRADE,
+ AWS_HTTP_HEADER_KEEP_ALIVE,
+ AWS_HTTP_HEADER_PROXY_CONNECTION,
+
+ AWS_HTTP_HEADER_COUNT, /* Number of enums */
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API void aws_http_fatal_assert_library_initialized(void);
+
+AWS_HTTP_API struct aws_byte_cursor aws_http_version_to_str(enum aws_http_version version);
+
+/**
+ * Returns appropriate enum, or AWS_HTTP_METHOD_UNKNOWN if no match found.
+ * Case-sensitive
+ */
+AWS_HTTP_API enum aws_http_method aws_http_str_to_method(struct aws_byte_cursor cursor);
+
+/**
+ * Returns appropriate enum, or AWS_HTTP_HEADER_UNKNOWN if no match found.
+ * Not case-sensitive
+ */
+AWS_HTTP_API enum aws_http_header_name aws_http_str_to_header_name(struct aws_byte_cursor cursor);
+
+/**
+ * Returns appropriate enum, or AWS_HTTP_HEADER_UNKNOWN if no match found.
+ * Case-sensitive (ex: "Connection" -> AWS_HTTP_HEADER_UNKNOWN because we looked for "connection").
+ */
+AWS_HTTP_API enum aws_http_header_name aws_http_lowercase_str_to_header_name(struct aws_byte_cursor cursor);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h
new file mode 100644
index 00000000000..c47305b251e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h
@@ -0,0 +1,236 @@
+#ifndef AWS_HTTP_PROXY_IMPL_H
+#define AWS_HTTP_PROXY_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/http/connection.h>
+#include <aws/http/proxy.h>
+#include <aws/http/status_code.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/socket.h>
+
+struct aws_http_connection_manager_options;
+struct aws_http_message;
+struct aws_channel_slot;
+struct aws_string;
+struct aws_tls_connection_options;
+struct aws_http_proxy_negotiator;
+struct aws_http_proxy_strategy;
+struct aws_http_proxy_strategy_tunneling_sequence_options;
+struct aws_http_proxy_strategy_tunneling_kerberos_options;
+struct aws_http_proxy_strategy_tunneling_ntlm_options;
+
+/*
+ * (Successful) State transitions for proxy connections
+ *
+ * Http : None -> Socket Connect -> Success
+ * Https: None -> Socket Connect -> Http Connect -> Tls Negotiation -> Success
+ */
+enum aws_proxy_bootstrap_state {
+ AWS_PBS_NONE = 0,
+ AWS_PBS_SOCKET_CONNECT,
+ AWS_PBS_HTTP_CONNECT,
+ AWS_PBS_TLS_NEGOTIATION,
+ AWS_PBS_SUCCESS,
+ AWS_PBS_FAILURE,
+};
+
+/**
+ * A persistent copy of the aws_http_proxy_options struct. Clones everything appropriate.
+ */
+struct aws_http_proxy_config {
+
+ struct aws_allocator *allocator;
+
+ enum aws_http_proxy_connection_type connection_type;
+
+ struct aws_byte_buf host;
+
+ uint16_t port;
+
+ struct aws_tls_connection_options *tls_options;
+
+ struct aws_http_proxy_strategy *proxy_strategy;
+};
+
+/*
+ * When a proxy connection is made, we wrap the user-supplied user data with this
+ * proxy user data. Callbacks are passed properly to the user. By having this data
+ * available, the proxy request transform that was attached to the connection can extract
+ * the proxy settings it needs in order to properly transform the requests.
+ *
+ * Another possibility would be to fold this data into the connection itself.
+ */
+struct aws_http_proxy_user_data {
+ struct aws_allocator *allocator;
+
+ /*
+ * dynamic proxy connection resolution state
+ */
+ enum aws_proxy_bootstrap_state state;
+ int error_code;
+ enum aws_http_status_code connect_status_code;
+
+ /*
+ * The initial http connection object between the client and the proxy.
+ */
+ struct aws_http_connection *proxy_connection;
+
+ /*
+ * The http connection object that gets surfaced to callers if http is the final protocol of proxy
+ * negotiation.
+ *
+ * In the case of a forwarding proxy, proxy_connection and final_connection are the same.
+ */
+ struct aws_http_connection *final_connection;
+ struct aws_http_message *connect_request;
+ struct aws_http_stream *connect_stream;
+ struct aws_http_proxy_negotiator *proxy_negotiator;
+
+ /*
+ * Cached original connect options
+ */
+ struct aws_string *original_host;
+ uint16_t original_port;
+ void *original_user_data;
+ struct aws_tls_connection_options *original_tls_options;
+ struct aws_client_bootstrap *original_bootstrap;
+ struct aws_socket_options original_socket_options;
+ bool original_manual_window_management;
+ size_t original_initial_window_size;
+ bool prior_knowledge_http2;
+ struct aws_http1_connection_options original_http1_options;
+ struct aws_http2_connection_options
+ original_http2_options; /* the resource within options are allocated with userdata */
+ struct aws_hash_table alpn_string_map;
+ /*
+ * setup/shutdown callbacks. We enforce via fatal assert that either the http callbacks are supplied or
+ * the channel callbacks are supplied but never both.
+ *
+ * When using a proxy to ultimately establish an http connection, use the http callbacks.
+ * When using a proxy to establish any other protocol connection, use the raw channel callbacks.
+ *
+ * In the future, we might consider a further refactor which only use raw channel callbacks.
+ */
+ aws_http_on_client_connection_setup_fn *original_http_on_setup;
+ aws_http_on_client_connection_shutdown_fn *original_http_on_shutdown;
+ aws_client_bootstrap_on_channel_event_fn *original_channel_on_setup;
+ aws_client_bootstrap_on_channel_event_fn *original_channel_on_shutdown;
+
+ struct aws_http_proxy_config *proxy_config;
+
+ struct aws_event_loop *requested_event_loop;
+};
+
+struct aws_http_proxy_system_vtable {
+ int (*setup_client_tls)(struct aws_channel_slot *right_of_slot, struct aws_tls_connection_options *tls_options);
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+struct aws_http_proxy_user_data *aws_http_proxy_user_data_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_setup,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown);
+
+AWS_HTTP_API
+void aws_http_proxy_user_data_destroy(struct aws_http_proxy_user_data *user_data);
+
+AWS_HTTP_API
+int aws_http_client_connect_via_proxy(const struct aws_http_client_connection_options *options);
+
+AWS_HTTP_API
+int aws_http_rewrite_uri_for_proxy_request(
+ struct aws_http_message *request,
+ struct aws_http_proxy_user_data *proxy_user_data);
+
+AWS_HTTP_API
+void aws_http_proxy_system_set_vtable(struct aws_http_proxy_system_vtable *vtable);
+
+/**
+ * Checks if tunneling proxy negotiation should continue to try and connect
+ * @param proxy_negotiator negotiator to query
+ * @return true if another connect request should be attempted, false otherwise
+ */
+AWS_HTTP_API
+enum aws_http_proxy_negotiation_retry_directive aws_http_proxy_negotiator_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Constructor for a tunnel-only proxy strategy that applies no changes to outbound CONNECT requests. Intended to be
+ * the first link in an adaptive sequence for a tunneling proxy: first try a basic CONNECT, then based on the response,
+ * later links are allowed to make attempts.
+ *
+ * @param allocator memory allocator to use
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_one_time_identity(
+ struct aws_allocator *allocator);
+
+/**
+ * Constructor for a forwarding-only proxy strategy that does nothing. Exists so that all proxy logic uses a
+ * strategy.
+ *
+ * @param allocator memory allocator to use
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_forwarding_identity(struct aws_allocator *allocator);
+
+/**
+ * Constructor for a tunneling proxy strategy that contains a set of sub-strategies which are tried
+ * sequentially in order. Each strategy has the choice to either proceed on a fresh connection or
+ * reuse the current one.
+ *
+ * @param allocator memory allocator to use
+ * @param config sequence configuration options
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_sequence(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_sequence_options *config);
+
+/**
+ * A constructor for a proxy strategy that performs kerberos authentication by adding the appropriate
+ * header and header value to CONNECT requests.
+ *
+ * Currently only supports synchronous fetch of kerberos token values.
+ *
+ * @param allocator memory allocator to use
+ * @param config kerberos authentication configuration info
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_kerberos(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_kerberos_options *config);
+
+/**
+ * Constructor for an NTLM proxy strategy. Because ntlm is a challenge-response authentication protocol, this
+ * strategy will only succeed in a chain in a non-leading position. The strategy extracts the challenge from the
+ * proxy's response to a previous CONNECT request in the chain.
+ *
+ * Currently only supports synchronous fetch of token values.
+ *
+ * @param allocator memory allocator to use
+ * @param config configuration options for the strategy
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *config);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_PROXY_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h
new file mode 100644
index 00000000000..d0880a7194f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h
@@ -0,0 +1,86 @@
+#ifndef AWS_HTTP_RANDOM_ACCESS_SET_H
+#define AWS_HTTP_RANDOM_ACCESS_SET_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/hash_table.h>
+#include <aws/http/http.h>
+
+/* TODO: someday, if you want to use it from other repo, move it to aws-c-common. */
+
+struct aws_random_access_set_impl;
+
+struct aws_random_access_set {
+ struct aws_random_access_set_impl *impl;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initialize the set, which support constant time of insert, remove and get random element
+ * from the data structure.
+ *
+ * The underlying hash map will use hash_fn to compute the hash of each element. equals_fn to compute equality of two
+ * keys.
+ *
+ * @param set Pointer of structure to initialize with
+ * @param allocator Allocator
+ * @param hash_fn Compute the hash of each element
+ * @param equals_fn Compute equality of two elements
+ * @param destroy_element_fn Optional. Called when the element is removed
+ * @param initial_item_allocation The initial number of item to allocate.
+ * @return AWS_OP_ERR if any fails to initialize, AWS_OP_SUCCESS on success.
+ */
+AWS_HTTP_API
+int aws_random_access_set_init(
+ struct aws_random_access_set *set,
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_element_fn,
+ size_t initial_item_allocation);
+
+AWS_HTTP_API
+void aws_random_access_set_clean_up(struct aws_random_access_set *set);
+
+/**
+ * Insert the element to the end of the array list. A map from the element to the index of it to the hash table.
+ */
+AWS_HTTP_API
+int aws_random_access_set_add(struct aws_random_access_set *set, const void *element, bool *added);
+
+/**
+ * Find and remove the element from the table. If the element does not exist, or the table is empty, nothing will
+ * happen. Switch the element with the end of the arraylist if needed. Remove the end of the arraylist
+ */
+AWS_HTTP_API
+int aws_random_access_set_remove(struct aws_random_access_set *set, const void *element);
+
+/**
+ * Get the pointer to a random element from the data structure. Fails when the data structure is empty.
+ */
+AWS_HTTP_API
+int aws_random_access_set_random_get_ptr(const struct aws_random_access_set *set, void **out);
+
+AWS_HTTP_API
+size_t aws_random_access_set_get_size(const struct aws_random_access_set *set);
+
+/**
+ * Check the element exist in the data structure or not.
+ */
+AWS_HTTP_API
+int aws_random_access_set_exist(const struct aws_random_access_set *set, const void *element, bool *exist);
+
+/**
+ * Get the pointer to an element that currently stored at that index. It may change if operations like remove and add
+ * happens. Helpful for debugging and iterating through the whole set.
+ */
+AWS_HTTP_API
+int aws_random_access_set_random_get_ptr_index(const struct aws_random_access_set *set, void **out, size_t index);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP_RANDOM_ACCESS_SET_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h
new file mode 100644
index 00000000000..9cd06e01c24
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h
@@ -0,0 +1,69 @@
+#ifndef AWS_HTTP_REQUEST_RESPONSE_IMPL_H
+#define AWS_HTTP_REQUEST_RESPONSE_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/request_response.h>
+
+#include <aws/http/private/http_impl.h>
+
+#include <aws/common/atomics.h>
+
+struct aws_http_stream_vtable {
+ void (*destroy)(struct aws_http_stream *stream);
+ void (*update_window)(struct aws_http_stream *stream, size_t increment_size);
+ int (*activate)(struct aws_http_stream *stream);
+
+ int (*http1_write_chunk)(struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options);
+ int (*http1_add_trailer)(struct aws_http_stream *http1_stream, const struct aws_http_headers *trailing_headers);
+
+ int (*http2_reset_stream)(struct aws_http_stream *http2_stream, uint32_t http2_error);
+ int (*http2_get_received_error_code)(struct aws_http_stream *http2_stream, uint32_t *http2_error);
+ int (*http2_get_sent_error_code)(struct aws_http_stream *http2_stream, uint32_t *http2_error);
+ int (*http2_write_data)(
+ struct aws_http_stream *http2_stream,
+ const struct aws_http2_stream_write_data_options *options);
+};
+
+/**
+ * Base class for streams.
+ * There are specific implementations for each HTTP version.
+ */
+struct aws_http_stream {
+ const struct aws_http_stream_vtable *vtable;
+ struct aws_allocator *alloc;
+ struct aws_http_connection *owning_connection;
+
+ uint32_t id;
+
+ void *user_data;
+ aws_http_on_incoming_headers_fn *on_incoming_headers;
+ aws_http_on_incoming_header_block_done_fn *on_incoming_header_block_done;
+ aws_http_on_incoming_body_fn *on_incoming_body;
+ aws_http_on_stream_complete_fn *on_complete;
+ aws_http_on_stream_destroy_fn *on_destroy;
+
+ struct aws_atomic_var refcount;
+ enum aws_http_method request_method;
+
+ union {
+ struct aws_http_stream_client_data {
+ int response_status;
+ } client;
+ struct aws_http_stream_server_data {
+ struct aws_byte_cursor request_method_str;
+ struct aws_byte_cursor request_path;
+ aws_http_on_incoming_request_done_fn *on_request_done;
+ } server;
+ } client_or_server_data;
+
+ /* On client connections, `client_data` points to client_or_server_data.client and `server_data` is null.
+ * Opposite is true on server connections */
+ struct aws_http_stream_client_data *client_data;
+ struct aws_http_stream_server_data *server_data;
+};
+
+#endif /* AWS_HTTP_REQUEST_RESPONSE_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h
new file mode 100644
index 00000000000..f670599344f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h
@@ -0,0 +1,84 @@
+#ifndef AWS_HTTP_STRUTIL_H
+#define AWS_HTTP_STRUTIL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/http.h>
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Return a cursor with all leading and trailing SPACE and TAB characters removed.
+ * RFC7230 section 3.2.3 Whitespace
+ * Examples:
+ * " \t a \t " -> "a"
+ * "a \t a" -> "a \t a"
+ */
+AWS_HTTP_API
+struct aws_byte_cursor aws_strutil_trim_http_whitespace(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this is a valid token, as defined by RFC7230 section 3.2.6:
+ * token = 1*tchar
+ * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+ * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+ * / DIGIT / ALPHA
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_token(struct aws_byte_cursor token);
+
+/**
+ * Same as aws_strutil_is_http_token(), but uppercase letters are forbidden.
+ */
+AWS_HTTP_API
+bool aws_strutil_is_lowercase_http_token(struct aws_byte_cursor token);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence is a valid HTTP header field-value.
+ *
+ * As defined in RFC7230 section 3.2 (except we are ALWAYS forbidding obs-fold):
+ *
+ * field-value = *( field-content / obs-fold )
+ * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+ * field-vchar = VCHAR / obs-text
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ *
+ * Note that we ALWAYS forbid obs-fold. Section 3.2.4 explains how
+ * obs-fold is deprecated "except within the message/http media type".
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_field_value(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence is a valid HTTP response status reason-phrase.
+ *
+ * As defined in RFC7230 section 3.1.2:
+ *
+ * reason-phrase = *( HTAB / SP / VCHAR / obs-text )
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_reason_phrase(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence is a valid HTTP request-target.
+ *
+ * TODO: Actually check the complete grammar as defined in RFC7230 5.3 and
+ * RFC3986. Currently this just checks whether the sequence is blatantly illegal
+ * (ex: contains CR or LF)
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_request_target(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence start with ":" or not as the requirement for pseudo headers.
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_pseudo_header_name(struct aws_byte_cursor cursor);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP_STRUTIL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h
new file mode 100644
index 00000000000..d9e84c59978
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h
@@ -0,0 +1,79 @@
+#ifndef AWS_HTTP_WEBSOCKET_DECODER_H
+#define AWS_HTTP_WEBSOCKET_DECODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_impl.h>
+
+/* Called when the non-payload portion of a frame has been decoded. */
+typedef int(aws_websocket_decoder_frame_fn)(const struct aws_websocket_frame *frame, void *user_data);
+
+/* Called repeatedly as the payload is decoded. If a mask was used, the data has been unmasked. */
+typedef int(aws_websocket_decoder_payload_fn)(struct aws_byte_cursor data, void *user_data);
+
+/**
+ * Each state consumes data and/or moves decoder to a subsequent state.
+ */
+enum aws_websocket_decoder_state {
+ AWS_WEBSOCKET_DECODER_STATE_INIT,
+ AWS_WEBSOCKET_DECODER_STATE_OPCODE_BYTE,
+ AWS_WEBSOCKET_DECODER_STATE_LENGTH_BYTE,
+ AWS_WEBSOCKET_DECODER_STATE_EXTENDED_LENGTH,
+ AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK,
+ AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY,
+ AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK,
+ AWS_WEBSOCKET_DECODER_STATE_PAYLOAD,
+ AWS_WEBSOCKET_DECODER_STATE_FRAME_END,
+ AWS_WEBSOCKET_DECODER_STATE_DONE,
+};
+
+struct aws_websocket_decoder {
+ enum aws_websocket_decoder_state state;
+ uint64_t state_bytes_processed; /* For multi-byte states, the number of bytes processed so far */
+ uint8_t state_cache[8]; /* For multi-byte states to cache data that might be split across packets */
+
+ struct aws_websocket_frame current_frame; /* Data about current frame being decoded */
+
+ bool expecting_continuation_data_frame; /* True when the next data frame must be CONTINUATION frame */
+
+ /* True while processing a TEXT "message" (from the start of a TEXT frame,
+ * until the end of the TEXT or CONTINUATION frame with the FIN bit set). */
+ bool processing_text_message;
+ struct aws_utf8_decoder *text_message_validator;
+
+ void *user_data;
+ aws_websocket_decoder_frame_fn *on_frame;
+ aws_websocket_decoder_payload_fn *on_payload;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+void aws_websocket_decoder_init(
+ struct aws_websocket_decoder *decoder,
+ struct aws_allocator *alloc,
+ aws_websocket_decoder_frame_fn *on_frame,
+ aws_websocket_decoder_payload_fn *on_payload,
+ void *user_data);
+
+AWS_HTTP_API
+void aws_websocket_decoder_clean_up(struct aws_websocket_decoder *decoder);
+
+/**
+ * Returns when all data is processed, or a frame and its payload have completed.
+ * `data` will be advanced to reflect the amount of data processed by this call.
+ * `frame_complete` will be set true if this call returned due to completion of a frame.
+ * The `on_frame` and `on_payload` callbacks may each be invoked once as a result of this call.
+ * If an error occurs, the decoder is invalid forevermore.
+ */
+AWS_HTTP_API int aws_websocket_decoder_process(
+ struct aws_websocket_decoder *decoder,
+ struct aws_byte_cursor *data,
+ bool *frame_complete);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_WEBSOCKET_DECODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h
new file mode 100644
index 00000000000..7fe4949bead
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h
@@ -0,0 +1,57 @@
+#ifndef AWS_HTTP_WEBSOCKET_ENCODER_H
+#define AWS_HTTP_WEBSOCKET_ENCODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_impl.h>
+
+typedef int(aws_websocket_encoder_payload_fn)(struct aws_byte_buf *out_buf, void *user_data);
+
+enum aws_websocket_encoder_state {
+ AWS_WEBSOCKET_ENCODER_STATE_INIT,
+ AWS_WEBSOCKET_ENCODER_STATE_OPCODE_BYTE,
+ AWS_WEBSOCKET_ENCODER_STATE_LENGTH_BYTE,
+ AWS_WEBSOCKET_ENCODER_STATE_EXTENDED_LENGTH,
+ AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK,
+ AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY,
+ AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK,
+ AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD,
+ AWS_WEBSOCKET_ENCODER_STATE_DONE,
+};
+
+struct aws_websocket_encoder {
+ enum aws_websocket_encoder_state state;
+ uint64_t state_bytes_processed;
+ struct aws_websocket_frame frame;
+ bool is_frame_in_progress;
+
+ /* True when the next data frame must be a CONTINUATION frame */
+ bool expecting_continuation_data_frame;
+
+ void *user_data;
+ aws_websocket_encoder_payload_fn *stream_outgoing_payload;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+void aws_websocket_encoder_init(
+ struct aws_websocket_encoder *encoder,
+ aws_websocket_encoder_payload_fn *stream_outgoing_payload,
+ void *user_data);
+
+AWS_HTTP_API
+int aws_websocket_encoder_start_frame(struct aws_websocket_encoder *encoder, const struct aws_websocket_frame *frame);
+
+AWS_HTTP_API
+bool aws_websocket_encoder_is_frame_in_progress(const struct aws_websocket_encoder *encoder);
+
+AWS_HTTP_API
+int aws_websocket_encoder_process(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_WEBSOCKET_ENCODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h
new file mode 100644
index 00000000000..c807be2dac0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h
@@ -0,0 +1,115 @@
+#ifndef AWS_HTTP_WEBSOCKET_IMPL_H
+#define AWS_HTTP_WEBSOCKET_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/websocket.h>
+
+struct aws_http_client_connection_options;
+struct aws_http_connection;
+struct aws_http_make_request_options;
+
+/* RFC-6455 Section 5.2 Base Framing Protocol
+ * Payload length: 7 bits, 7+16 bits, or 7+64 bits
+ *
+ * The length of the "Payload data", in bytes: if 0-125, that is the
+ * payload length. If 126, the following 2 bytes interpreted as a
+ * 16-bit unsigned integer are the payload length. If 127, the
+ * following 8 bytes interpreted as a 64-bit unsigned integer (the
+ * most significant bit MUST be 0) are the payload length. Multibyte
+ * length quantities are expressed in network byte order. Note that
+ * in all cases, the minimal number of bytes MUST be used to encode
+ * the length, for example, the length of a 124-byte-long string
+ * can't be encoded as the sequence 126, 0, 124. The payload length
+ * is the length of the "Extension data" + the length of the
+ * "Application data". The length of the "Extension data" may be
+ * zero, in which case the payload length is the length of the
+ * "Application data".
+ */
+#define AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH 126
+#define AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH 127
+
+#define AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH
+#define AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE 0x000000000000FFFF
+
+#define AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE 0x0000000000010000
+#define AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE 0x7FFFFFFFFFFFFFFF
+
+/* Max bytes necessary to send non-payload parts of a frame */
+#define AWS_WEBSOCKET_MAX_FRAME_OVERHEAD (2 + 8 + 4) /* base + extended-length + masking-key */
+
+/**
+ * Full contents of a websocket frame, excluding the payload.
+ */
+struct aws_websocket_frame {
+ bool fin;
+ bool rsv[3];
+ bool masked;
+ uint8_t opcode;
+ uint64_t payload_length;
+ uint8_t masking_key[4];
+};
+
+struct aws_websocket_handler_options {
+ struct aws_allocator *allocator;
+ struct aws_channel *channel;
+ size_t initial_window_size;
+
+ void *user_data;
+ aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin;
+ aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload;
+ aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete;
+
+ bool is_server;
+ bool manual_window_update;
+};
+
+struct aws_websocket_client_bootstrap_system_vtable {
+ int (*aws_http_client_connect)(const struct aws_http_client_connection_options *options);
+ void (*aws_http_connection_release)(struct aws_http_connection *connection);
+ void (*aws_http_connection_close)(struct aws_http_connection *connection);
+ struct aws_channel *(*aws_http_connection_get_channel)(struct aws_http_connection *connection);
+ struct aws_http_stream *(*aws_http_connection_make_request)(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+ int (*aws_http_stream_activate)(struct aws_http_stream *stream);
+ void (*aws_http_stream_release)(struct aws_http_stream *stream);
+ struct aws_http_connection *(*aws_http_stream_get_connection)(const struct aws_http_stream *stream);
+ void (*aws_http_stream_update_window)(struct aws_http_stream *stream, size_t increment_size);
+ int (*aws_http_stream_get_incoming_response_status)(const struct aws_http_stream *stream, int *out_status);
+ struct aws_websocket *(*aws_websocket_handler_new)(const struct aws_websocket_handler_options *options);
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Returns printable name for opcode as c-string.
+ */
+AWS_HTTP_API
+const char *aws_websocket_opcode_str(uint8_t opcode);
+
+/**
+ * Return total number of bytes needed to encode frame and its payload
+ */
+AWS_HTTP_API
+uint64_t aws_websocket_frame_encoded_size(const struct aws_websocket_frame *frame);
+
+/**
+ * Create a websocket channel-handler and insert it into the channel.
+ */
+AWS_HTTP_API
+struct aws_websocket *aws_websocket_handler_new(const struct aws_websocket_handler_options *options);
+
+/**
+ * Override the functions that websocket bootstrap uses to interact with external systems.
+ * Used for unit testing.
+ */
+AWS_HTTP_API
+void aws_websocket_client_bootstrap_set_system_vtable(
+ const struct aws_websocket_client_bootstrap_system_vtable *system_vtable);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP_WEBSOCKET_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h b/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h
new file mode 100644
index 00000000000..cd4c92107df
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h
@@ -0,0 +1,570 @@
+#ifndef AWS_PROXY_H
+#define AWS_PROXY_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/ref_count.h>
+#include <aws/http/http.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+
+struct aws_http_client_connection_options;
+struct aws_http_connection_manager_options;
+
+struct aws_http_message;
+struct aws_http_header;
+
+struct aws_http_proxy_config;
+struct aws_http_proxy_negotiator;
+struct aws_http_proxy_strategy;
+
+struct aws_socket_channel_bootstrap_options;
+
+/**
+ * @Deprecated - Supported proxy authentication modes. Superceded by proxy strategy.
+ */
+enum aws_http_proxy_authentication_type {
+ AWS_HPAT_NONE = 0,
+ AWS_HPAT_BASIC,
+};
+
+enum aws_http_proxy_env_var_type {
+ /**
+ * Default.
+ * Disable reading from environment variable for proxy.
+ */
+ AWS_HPEV_DISABLE = 0,
+ /**
+ * Enable get proxy URL from environment variable, when the manual proxy options of connection manager is not set.
+ * env HTTPS_PROXY/https_proxy will be checked when the main connection use tls.
+ * env HTTP_PROXY/http_proxy will be checked when the main connection NOT use tls.
+ * The lower case version has precedence.
+ */
+ AWS_HPEV_ENABLE,
+};
+
+/**
+ * Supported proxy connection types
+ */
+enum aws_http_proxy_connection_type {
+ /**
+ * Deprecated, but 0-valued for backwards compatibility
+ *
+ * If tls options are provided (for the main connection) then treat the proxy as a tunneling proxy
+ * If tls options are not provided (for the main connection), then treat the proxy as a forwarding proxy
+ */
+ AWS_HPCT_HTTP_LEGACY = 0,
+
+ /**
+ * Use the proxy to forward http requests. Attempting to use both this mode and TLS on the tunnel destination
+ * is a configuration error.
+ */
+ AWS_HPCT_HTTP_FORWARD,
+
+ /**
+ * Use the proxy to establish a connection to a remote endpoint via a CONNECT request through the proxy.
+ * Works for both plaintext and tls connections.
+ */
+ AWS_HPCT_HTTP_TUNNEL,
+};
+
+/*
+ * Configuration for using proxy from environment variable.
+ * Zero out as default settings.
+ */
+struct proxy_env_var_settings {
+ enum aws_http_proxy_env_var_type env_var_type;
+ /*
+ * Optional.
+ * If not set:
+ * If tls options are provided (for the main connection) use tunnel proxy type
+ * If tls options are not provided (for the main connection) use forward proxy type
+ */
+ enum aws_http_proxy_connection_type connection_type;
+ /*
+ * Optional.
+ * If not set, a default tls option will be created. when https used for Local to proxy connection.
+ * Must be distinct from the the tls_connection_options from aws_http_connection_manager_options
+ */
+ const struct aws_tls_connection_options *tls_options;
+};
+
+struct aws_http_proxy_strategy;
+
+/**
+ * Options for http proxy server usage
+ */
+struct aws_http_proxy_options {
+
+ /**
+ * Type of proxy connection to make
+ */
+ enum aws_http_proxy_connection_type connection_type;
+
+ /**
+ * Proxy host to connect to
+ */
+ struct aws_byte_cursor host;
+
+ /**
+ * Port to make the proxy connection to
+ */
+ uint16_t port;
+
+ /**
+ * Optional.
+ * TLS configuration for the Local <-> Proxy connection
+ * Must be distinct from the the TLS options in the parent aws_http_connection_options struct
+ */
+ const struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Optional
+ * Advanced option that allows the user to create a custom strategy that gives low-level control of
+ * certain logical flows within the proxy logic.
+ *
+ * For tunneling proxies it allows custom retry and adaptive negotiation of CONNECT requests.
+ * For forwarding proxies it allows custom request transformations.
+ */
+ struct aws_http_proxy_strategy *proxy_strategy;
+
+ /**
+ * @Deprecated - What type of proxy authentication to use, if any.
+ * Replaced by instantiating a proxy_strategy
+ */
+ enum aws_http_proxy_authentication_type auth_type;
+
+ /**
+ * @Deprecated - Optional user name to use for basic authentication
+ * Replaced by instantiating a proxy_strategy via aws_http_proxy_strategy_new_basic_auth()
+ */
+ struct aws_byte_cursor auth_username;
+
+ /**
+ * @Deprecated - Optional password to use for basic authentication
+ * Replaced by instantiating a proxy_strategy via aws_http_proxy_strategy_new_basic_auth()
+ */
+ struct aws_byte_cursor auth_password;
+};
+
+/**
+ * Synchronous (for now) callback function to fetch a token used in modifying CONNECT requests
+ */
+typedef struct aws_string *(aws_http_proxy_negotiation_get_token_sync_fn)(void *user_data, int *out_error_code);
+
+/**
+ * Synchronous (for now) callback function to fetch a token used in modifying CONNECT request. Includes a (byte string)
+ * context intended to be used as part of a challenge-response flow.
+ */
+typedef struct aws_string *(aws_http_proxy_negotiation_get_challenge_token_sync_fn)(
+ void *user_data,
+ const struct aws_byte_cursor *challenge_context,
+ int *out_error_code);
+
+/**
+ * Proxy negotiation logic must call this function to indicate an unsuccessful outcome
+ */
+typedef void(aws_http_proxy_negotiation_terminate_fn)(
+ struct aws_http_message *message,
+ int error_code,
+ void *internal_proxy_user_data);
+
+/**
+ * Proxy negotiation logic must call this function to forward the potentially-mutated request back to the proxy
+ * connection logic.
+ */
+typedef void(aws_http_proxy_negotiation_http_request_forward_fn)(
+ struct aws_http_message *message,
+ void *internal_proxy_user_data);
+
+/**
+ * User-supplied transform callback which implements the proxy request flow and ultimately, across all execution
+ * pathways, invokes either the terminate function or the forward function appropriately.
+ *
+ * For tunneling proxy connections, this request flow transform only applies to the CONNECT stage of proxy
+ * connection establishment.
+ *
+ * For forwarding proxy connections, this request flow transform applies to every single http request that goes
+ * out on the connection.
+ *
+ * Forwarding proxy connections cannot yet support a truly async request transform without major surgery on http
+ * stream creation, so for now, we split into an async version (for tunneling proxies) and a separate
+ * synchronous version for forwarding proxies. Also forwarding proxies are a kind of legacy dead-end in some
+ * sense.
+ *
+ */
+typedef void(aws_http_proxy_negotiation_http_request_transform_async_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data);
+
+typedef int(aws_http_proxy_negotiation_http_request_transform_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message);
+
+/**
+ * Tunneling proxy connections only. A callback that lets the negotiator examine the headers in the
+ * response to the most recent CONNECT request as they arrive.
+ */
+typedef int(aws_http_proxy_negotiation_connect_on_incoming_headers_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers);
+
+/**
+ * Tunneling proxy connections only. A callback that lets the negotiator examine the status code of the
+ * response to the most recent CONNECT request.
+ */
+typedef int(aws_http_proxy_negotiator_connect_status_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code);
+
+/**
+ * Tunneling proxy connections only. A callback that lets the negotiator examine the body of the response
+ * to the most recent CONNECT request.
+ */
+typedef int(aws_http_proxy_negotiator_connect_on_incoming_body_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data);
+
+/*
+ * Control value that lets the http proxy implementation know if and how to retry a CONNECT request based on
+ * the proxy negotiator's state.
+ */
+enum aws_http_proxy_negotiation_retry_directive {
+ /*
+ * Stop trying to connect through the proxy and give up.
+ */
+ AWS_HPNRD_STOP,
+
+ /*
+ * Establish a new connection to the proxy before making the next CONNECT request
+ */
+ AWS_HPNRD_NEW_CONNECTION,
+
+ /*
+ * Reuse the existing connection to make the next CONNECT request
+ */
+ AWS_HPNRD_CURRENT_CONNECTION,
+};
+
+typedef enum aws_http_proxy_negotiation_retry_directive(aws_http_proxy_negotiator_get_retry_directive_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Vtable for forwarding-based proxy negotiators
+ */
+struct aws_http_proxy_negotiator_forwarding_vtable {
+ aws_http_proxy_negotiation_http_request_transform_fn *forward_request_transform;
+};
+
+/**
+ * Vtable for tunneling-based proxy negotiators
+ */
+struct aws_http_proxy_negotiator_tunnelling_vtable {
+ aws_http_proxy_negotiation_http_request_transform_async_fn *connect_request_transform;
+
+ aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers_callback;
+ aws_http_proxy_negotiator_connect_status_fn *on_status_callback;
+ aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body_callback;
+
+ aws_http_proxy_negotiator_get_retry_directive_fn *get_retry_directive;
+};
+
+/*
+ * Base definition of a proxy negotiator.
+ *
+ * A negotiator works differently based on what kind of proxy connection is being asked for:
+ *
+ * (1) Tunneling - In a tunneling proxy connection, the connect_request_transform is invoked on every CONNECT request.
+ * The connect_request_transform implementation *MUST*, in turn, eventually call one of the terminate or forward
+ * functions it gets supplied with.
+ *
+ * Every CONNECT request, if a response is obtained, will properly invoke the response handling callbacks supplied
+ * in the tunneling vtable.
+ *
+ * (2) Forwarding - In a forwarding proxy connection, the forward_request_transform is invoked on every request sent out
+ * on the connection.
+ */
+struct aws_http_proxy_negotiator {
+ struct aws_ref_count ref_count;
+
+ void *impl;
+
+ union {
+ struct aws_http_proxy_negotiator_forwarding_vtable *forwarding_vtable;
+ struct aws_http_proxy_negotiator_tunnelling_vtable *tunnelling_vtable;
+ } strategy_vtable;
+};
+
+/*********************************************************************************************/
+
+typedef struct aws_http_proxy_negotiator *(aws_http_proxy_strategy_create_negotiator_fn)(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator);
+
+struct aws_http_proxy_strategy_vtable {
+ aws_http_proxy_strategy_create_negotiator_fn *create_negotiator;
+};
+
+struct aws_http_proxy_strategy {
+ struct aws_ref_count ref_count;
+ struct aws_http_proxy_strategy_vtable *vtable;
+ void *impl;
+ enum aws_http_proxy_connection_type proxy_connection_type;
+};
+
+/*
+ * Options necessary to create a basic authentication proxy strategy
+ */
+struct aws_http_proxy_strategy_basic_auth_options {
+
+ /* type of proxy connection being established, must be forwarding or tunnel */
+ enum aws_http_proxy_connection_type proxy_connection_type;
+
+ /* user name to use in basic authentication */
+ struct aws_byte_cursor user_name;
+
+ /* password to use in basic authentication */
+ struct aws_byte_cursor password;
+};
+
+/*
+ * Options necessary to create a (synchronous) kerberos authentication proxy strategy
+ */
+struct aws_http_proxy_strategy_tunneling_kerberos_options {
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ void *get_token_user_data;
+};
+
+/*
+ * Options necessary to create a (synchronous) ntlm authentication proxy strategy
+ */
+struct aws_http_proxy_strategy_tunneling_ntlm_options {
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ aws_http_proxy_negotiation_get_challenge_token_sync_fn *get_challenge_token;
+
+ void *get_challenge_token_user_data;
+};
+
+/*
+ * Options necessary to create an adaptive sequential strategy that tries one or more of kerberos and ntlm (in that
+ * order, if both are active). If an options struct is NULL, then that strategy will not be used.
+ */
+struct aws_http_proxy_strategy_tunneling_adaptive_options {
+ /*
+ * If non-null, will insert a kerberos proxy strategy into the adaptive sequence
+ */
+ struct aws_http_proxy_strategy_tunneling_kerberos_options *kerberos_options;
+
+ /*
+ * If non-null will insert an ntlm proxy strategy into the adaptive sequence
+ */
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *ntlm_options;
+};
+
+/*
+ * Options necessary to create a sequential proxy strategy.
+ */
+struct aws_http_proxy_strategy_tunneling_sequence_options {
+ struct aws_http_proxy_strategy **strategies;
+
+ uint32_t strategy_count;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Take a reference to an http proxy negotiator
+ * @param proxy_negotiator negotiator to take a reference to
+ * @return the strategy
+ */
+AWS_HTTP_API
+struct aws_http_proxy_negotiator *aws_http_proxy_negotiator_acquire(struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Release a reference to an http proxy negotiator
+ * @param proxy_negotiator negotiator to release a reference to
+ */
+AWS_HTTP_API
+void aws_http_proxy_negotiator_release(struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Creates a new proxy negotiator from a proxy strategy
+ * @param allocator memory allocator to use
+ * @param strategy strategy to creation a new negotiator for
+ * @return a new proxy negotiator if successful, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_negotiator *aws_http_proxy_strategy_create_negotiator(
+ struct aws_http_proxy_strategy *strategy,
+ struct aws_allocator *allocator);
+
+/**
+ * Take a reference to an http proxy strategy
+ * @param proxy_strategy strategy to take a reference to
+ * @return the strategy
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_acquire(struct aws_http_proxy_strategy *proxy_strategy);
+
+/**
+ * Release a reference to an http proxy strategy
+ * @param proxy_strategy strategy to release a reference to
+ */
+AWS_HTTP_API
+void aws_http_proxy_strategy_release(struct aws_http_proxy_strategy *proxy_strategy);
+
+/**
+ * A constructor for a proxy strategy that performs basic authentication by adding the appropriate
+ * header and header value to requests or CONNECT requests.
+ *
+ * @param allocator memory allocator to use
+ * @param config basic authentication configuration info
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_basic_auth(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_basic_auth_options *config);
+
+/**
+ * Constructor for an adaptive tunneling proxy strategy. This strategy attempts a vanilla CONNECT and if that
+ * fails it may make followup CONNECT attempts using kerberos or ntlm tokens, based on configuration and proxy
+ * response properties.
+ *
+ * @param allocator memory allocator to use
+ * @param config configuration options for the strategy
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_adaptive(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_adaptive_options *config);
+
+/*
+ * aws_http_proxy_config is the persistent, memory-managed version of aws_http_proxy_options
+ *
+ * This is a set of APIs for creating, destroying and converting between them
+ */
+
+/**
+ * Create a persistent proxy configuration from http connection options
+ * @param allocator memory allocator to use
+ * @param options http connection options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_connection_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options);
+
+/**
+ * Create a persistent proxy configuration from http connection manager options
+ * @param allocator memory allocator to use
+ * @param options http connection manager options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_manager_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options);
+
+/**
+ * Create a persistent proxy configuration from non-persistent proxy options. The resulting
+ * proxy configuration assumes a tunneling connection type.
+ *
+ * @param allocator memory allocator to use
+ * @param options http proxy options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_tunneling_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *options);
+
+/**
+ * Create a persistent proxy configuration from non-persistent proxy options.
+ * Legacy connection type of proxy options will be rejected.
+ *
+ * @param allocator memory allocator to use
+ * @param options http proxy options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *options);
+
+/**
+ * Create a persistent proxy configuration from non-persistent proxy options.
+ *
+ * @param allocator memory allocator to use
+ * @param options http proxy options to source proxy configuration from
+ * @param is_tls_connection tls connection info of the main connection to determine connection_type
+ * when the connection_type is legacy.
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options_with_tls_info(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options,
+ bool is_tls_connection);
+
+/**
+ * Clones an existing proxy configuration. A refactor could remove this (do a "move" between the old and new user
+ * data in the one spot it's used) but that should wait until we have better test cases for the logic where this
+ * gets invoked (ntlm/kerberos chains).
+ *
+ * @param allocator memory allocator to use
+ * @param proxy_config http proxy configuration to clone
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_clone(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_config *proxy_config);
+
+/**
+ * Destroys an http proxy configuration
+ * @param config http proxy configuration to destroy
+ */
+AWS_HTTP_API
+void aws_http_proxy_config_destroy(struct aws_http_proxy_config *config);
+
+/**
+ * Initializes non-persistent http proxy options from a persistent http proxy configuration
+ * @param options http proxy options to initialize
+ * @param config the http proxy config to use as an initialization source
+ */
+AWS_HTTP_API
+void aws_http_proxy_options_init_from_config(
+ struct aws_http_proxy_options *options,
+ const struct aws_http_proxy_config *config);
+
+/**
+ * Establish an arbitrary protocol connection through an http proxy via tunneling CONNECT. Alpn is
+ * not required for this connection process to succeed, but we encourage its use if available.
+ *
+ * @param channel_options configuration options for the socket level connection
+ * @param proxy_options configuration options for the proxy connection
+ *
+ * @return AWS_OP_SUCCESS if the asynchronous channel kickoff succeeded, AWS_OP_ERR otherwise
+ */
+AWS_HTTP_API int aws_http_proxy_new_socket_channel(
+ struct aws_socket_channel_bootstrap_options *channel_options,
+ const struct aws_http_proxy_options *proxy_options);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_PROXY_STRATEGY_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h b/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h
new file mode 100644
index 00000000000..a4ff6da9477
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h
@@ -0,0 +1,1072 @@
+#ifndef AWS_HTTP_REQUEST_RESPONSE_H
+#define AWS_HTTP_REQUEST_RESPONSE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http_connection;
+struct aws_input_stream;
+
+/**
+ * A stream exists for the duration of a request/response exchange.
+ * A client creates a stream to send a request and receive a response.
+ * A server creates a stream to receive a request and send a response.
+ * In http/2, a push-promise stream can be sent by a server and received by a client.
+ */
+struct aws_http_stream;
+
+/**
+ * Controls whether a header's strings may be compressed by encoding the index of
+ * strings in a cache, rather than encoding the literal string.
+ *
+ * This setting has no effect on HTTP/1.x connections.
+ * On HTTP/2 connections this controls HPACK behavior.
+ * See RFC-7541 Section 7.1 for security considerations.
+ */
+enum aws_http_header_compression {
+ /**
+ * Compress header by encoding the cached index of its strings,
+ * or by updating the cache to contain these strings for future reference.
+ * Best for headers that are sent repeatedly.
+ * This is the default setting.
+ */
+ AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
+
+ /**
+ * Encode header strings literally.
+ * If an intermediary re-broadcasts the headers, it is permitted to use cache.
+ * Best for unique headers that are unlikely to repeat.
+ */
+ AWS_HTTP_HEADER_COMPRESSION_NO_CACHE,
+
+ /**
+ * Encode header strings literally and forbid all intermediaries from using
+ * cache when re-broadcasting.
+ * Best for header fields that are highly valuable or sensitive to recovery.
+ */
+ AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE,
+};
+
+/**
+ * A lightweight HTTP header struct.
+ * Note that the underlying strings are not owned by the byte cursors.
+ */
+struct aws_http_header {
+ struct aws_byte_cursor name;
+ struct aws_byte_cursor value;
+
+ /* Controls whether the header's strings may be compressed via caching. */
+ enum aws_http_header_compression compression;
+};
+
+/**
+ * A transformable block of HTTP headers.
+ * Provides a nice API for getting/setting header names and values.
+ *
+ * All strings are copied and stored within this datastructure.
+ * The index of a given header may change any time headers are modified.
+ * When iterating headers, the following ordering rules apply:
+ *
+ * - Headers with the same name will always be in the same order, relative to one another.
+ * If "A: one" is added before "A: two", then "A: one" will always precede "A: two".
+ *
+ * - Headers with different names could be in any order, relative to one another.
+ * If "A: one" is seen before "B: bee" in one iteration, you might see "B: bee" before "A: one" on the next.
+ */
+struct aws_http_headers;
+
+/**
+ * Header block type.
+ * INFORMATIONAL: Header block for 1xx informational (interim) responses.
+ * MAIN: Main header block sent with request or response.
+ * TRAILING: Headers sent after the body of a request or response.
+ */
+enum aws_http_header_block {
+ AWS_HTTP_HEADER_BLOCK_MAIN,
+ AWS_HTTP_HEADER_BLOCK_INFORMATIONAL,
+ AWS_HTTP_HEADER_BLOCK_TRAILING,
+};
+
+/**
+ * The definition for an outgoing HTTP request or response.
+ * The message may be transformed (ex: signing the request) before its data is eventually sent.
+ *
+ * The message keeps internal copies of its trivial strings (method, path, headers)
+ * but does NOT take ownership of its body stream.
+ *
+ * A language binding would likely present this as an HttpMessage base class with
+ * HttpRequest and HttpResponse subclasses.
+ */
+struct aws_http_message;
+
+/**
+ * Function to invoke when a message transformation completes.
+ * This function MUST be invoked or the application will soft-lock.
+ * `message` and `complete_ctx` must be the same pointers provided to the `aws_http_message_transform_fn`.
+ * `error_code` should should be AWS_ERROR_SUCCESS if transformation was successful,
+ * otherwise pass a different AWS_ERROR_X value.
+ */
+typedef void(
+ aws_http_message_transform_complete_fn)(struct aws_http_message *message, int error_code, void *complete_ctx);
+
+/**
+ * A function that may modify a request or response before it is sent.
+ * The transformation may be asynchronous or immediate.
+ * The user MUST invoke the `complete_fn` when transformation is complete or the application will soft-lock.
+ * When invoking the `complete_fn`, pass along the `message` and `complete_ctx` provided here and an error code.
+ * The error code should be AWS_ERROR_SUCCESS if transformation was successful,
+ * otherwise pass a different AWS_ERROR_X value.
+ */
+typedef void(aws_http_message_transform_fn)(
+ struct aws_http_message *message,
+ void *user_data,
+ aws_http_message_transform_complete_fn *complete_fn,
+ void *complete_ctx);
+
+/**
+ * Invoked repeatedly times as headers are received.
+ * At this point, aws_http_stream_get_incoming_response_status() can be called for the client.
+ * And aws_http_stream_get_incoming_request_method() and aws_http_stream_get_incoming_request_uri() can be called for
+ * the server.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(aws_http_on_incoming_headers_fn)(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data);
+
+/**
+ * Invoked when the incoming header block of this type(informational/main/trailing) has been completely read.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(aws_http_on_incoming_header_block_done_fn)(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data);
+
+/**
+ * Called repeatedly as body data is received.
+ * The data must be copied immediately if you wish to preserve it.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Note that, if the connection is using manual_window_management then the window
+ * size has shrunk by the amount of body data received. If the window size
+ * reaches 0 no further data will be received. Increment the window size with
+ * aws_http_stream_update_window().
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(
+ aws_http_on_incoming_body_fn)(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data);
+
+/**
+ * Invoked when request has been completely read.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(aws_http_on_incoming_request_done_fn)(struct aws_http_stream *stream, void *user_data);
+
+/**
+ * Invoked when request/response stream is completely destroyed.
+ * This may be invoked synchronously when aws_http_stream_release() is called.
+ * This is invoked even if the stream is never activated.
+ */
+typedef void(aws_http_on_stream_complete_fn)(struct aws_http_stream *stream, int error_code, void *user_data);
+
+/**
+ * Invoked when request/response stream destroy completely.
+ * This can be invoked within the same thead who release the refcount on http stream.
+ */
+typedef void(aws_http_on_stream_destroy_fn)(void *user_data);
+
+/**
+ * Options for creating a stream which sends a request from the client and receives a response from the server.
+ */
+struct aws_http_make_request_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Required.
+ */
+ size_t self_size;
+
+ /**
+ * Definition for outgoing request.
+ * Required.
+ * The request will be kept alive via refcounting until the request completes.
+ */
+ struct aws_http_message *request;
+
+ void *user_data;
+
+ /**
+ * Invoked repeatedly times as headers are received.
+ * Optional.
+ * See `aws_http_on_incoming_headers_fn`.
+ */
+ aws_http_on_incoming_headers_fn *on_response_headers;
+
+ /**
+ * Invoked when response header block has been completely read.
+ * Optional.
+ * See `aws_http_on_incoming_header_block_done_fn`.
+ */
+ aws_http_on_incoming_header_block_done_fn *on_response_header_block_done;
+
+ /**
+ * Invoked repeatedly as body data is received.
+ * Optional.
+ * See `aws_http_on_incoming_body_fn`.
+ */
+ aws_http_on_incoming_body_fn *on_response_body;
+
+ /**
+ * Invoked when request/response stream is complete, whether successful or unsuccessful
+ * Optional.
+ * See `aws_http_on_stream_complete_fn`.
+ */
+ aws_http_on_stream_complete_fn *on_complete;
+
+ /* Callback for when the request/response stream is completely destroyed. */
+ aws_http_on_stream_destroy_fn *on_destroy;
+
+ /**
+ * When using HTTP/2, request body data will be provided over time. The stream will only be polled for writing
+ * when data has been supplied via `aws_http2_stream_write_data`
+ */
+ bool http2_use_manual_data_writes;
+};
+
+struct aws_http_request_handler_options {
+ /* Set to sizeof() this struct, used for versioning. */
+ size_t self_size;
+
+ /**
+ * Required.
+ */
+ struct aws_http_connection *server_connection;
+
+ /**
+ * user_data passed to callbacks.
+ * Optional.
+ */
+ void *user_data;
+
+ /**
+ * Invoked repeatedly times as headers are received.
+ * Optional.
+ * See `aws_http_on_incoming_headers_fn`.
+ */
+ aws_http_on_incoming_headers_fn *on_request_headers;
+
+ /**
+ * Invoked when the request header block has been completely read.
+ * Optional.
+ * See `aws_http_on_incoming_header_block_done_fn`.
+ */
+ aws_http_on_incoming_header_block_done_fn *on_request_header_block_done;
+
+ /**
+ * Invoked as body data is received.
+ * Optional.
+ * See `aws_http_on_incoming_body_fn`.
+ */
+ aws_http_on_incoming_body_fn *on_request_body;
+
+ /**
+ * Invoked when request has been completely read.
+ * Optional.
+ * See `aws_http_on_incoming_request_done_fn`.
+ */
+ aws_http_on_incoming_request_done_fn *on_request_done;
+
+ /**
+ * Invoked when request/response stream is complete, whether successful or unsuccessful
+ * Optional.
+ * See `aws_http_on_stream_complete_fn`.
+ */
+ aws_http_on_stream_complete_fn *on_complete;
+
+ /* Callback for when the request/response stream is completely destroyed. */
+ aws_http_on_stream_destroy_fn *on_destroy;
+};
+
+/**
+ * Invoked when the data stream of an outgoing HTTP write operation is no longer in use.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * @param stream HTTP-stream this write operation was submitted to.
+ * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent.
+ * Any other error_code indicates that the HTTP-stream is in the process of terminating.
+ * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ * the stream's termination has nothing to do with this write operation.
+ * Any other non-zero error code indicates a problem with this particular write
+ * operation's data.
+ * @param user_data User data for this write operation.
+ */
+typedef void aws_http_stream_write_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data);
+
+/**
+ * Invoked when the data of an outgoing HTTP/1.1 chunk is no longer in use.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * @param stream HTTP-stream this chunk was submitted to.
+ * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent.
+ * Any other error_code indicates that the HTTP-stream is in the process of terminating.
+ * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ * the stream's termination has nothing to do with this chunk.
+ * Any other non-zero error code indicates a problem with this particular chunk's data.
+ * @param user_data User data for this chunk.
+ */
+typedef aws_http_stream_write_complete_fn aws_http1_stream_write_chunk_complete_fn;
+
+/**
+ * HTTP/1.1 chunk extension for chunked encoding.
+ * Note that the underlying strings are not owned by the byte cursors.
+ */
+struct aws_http1_chunk_extension {
+ struct aws_byte_cursor key;
+ struct aws_byte_cursor value;
+};
+
+/**
+ * Encoding options for an HTTP/1.1 chunked transfer encoding chunk.
+ */
+struct aws_http1_chunk_options {
+ /*
+ * The data stream to be sent in a single chunk.
+ * The aws_input_stream must remain valid until on_complete is invoked.
+ * May be NULL in the final chunk with size 0.
+ *
+ * Note that, for Transfer-Encodings other than "chunked", the data is
+ * expected to already have that encoding applied. For example, if
+ * "Transfer-Encoding: gzip, chunked" then the data from aws_input_stream
+ * should already be in gzip format.
+ */
+ struct aws_input_stream *chunk_data;
+
+ /*
+ * Size of the chunk_data input stream in bytes.
+ */
+ uint64_t chunk_data_size;
+
+ /**
+ * A pointer to an array of chunked extensions.
+ * The num_extensions must match the length of the array.
+ * This data is deep-copied by aws_http1_stream_write_chunk(),
+ * it does not need to remain valid until on_complete is invoked.
+ */
+ struct aws_http1_chunk_extension *extensions;
+
+ /**
+ * The number of elements defined in the extensions array.
+ */
+ size_t num_extensions;
+
+ /**
+ * Invoked when the chunk data is no longer in use, whether or not it was successfully sent.
+ * Optional.
+ * See `aws_http1_stream_write_chunk_complete_fn`.
+ */
+ aws_http1_stream_write_chunk_complete_fn *on_complete;
+
+ /**
+ * User provided data passed to the on_complete callback on its invocation.
+ */
+ void *user_data;
+};
+
+/**
+ * Invoked when the data of an outgoing HTTP2 data frame is no longer in use.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * @param stream HTTP2-stream this write was submitted to.
+ * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent.
+ * Any other error_code indicates that the HTTP-stream is in the process of terminating.
+ * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ * the stream's termination has nothing to do with this write.
+ * Any other non-zero error code indicates a problem with this particular write's data.
+ * @param user_data User data for this write.
+ */
+typedef aws_http_stream_write_complete_fn aws_http2_stream_write_data_complete_fn;
+
+/**
+ * Encoding options for manual H2 data frame writes
+ */
+struct aws_http2_stream_write_data_options {
+ /**
+ * The data to be sent.
+ * Optional.
+ * If not set, input stream with length 0 will be used.
+ */
+ struct aws_input_stream *data;
+
+ /**
+ * Set true when it's the last chunk to be sent.
+ * After a write with end_stream, no more data write will be accepted.
+ */
+ bool end_stream;
+
+ /**
+ * Invoked when the data stream is no longer in use, whether or not it was successfully sent.
+ * Optional.
+ * See `aws_http2_stream_write_data_complete_fn`.
+ */
+ aws_http2_stream_write_data_complete_fn *on_complete;
+
+ /**
+ * User provided data passed to the on_complete callback on its invocation.
+ */
+ void *user_data;
+};
+
+#define AWS_HTTP_REQUEST_HANDLER_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_request_handler_options), }
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Return whether both names are equivalent.
+ * This is a case-insensitive string comparison.
+ *
+ * Example Matches:
+ * "Content-Length" == "content-length" // upper or lower case ok
+
+ * Example Mismatches:
+ * "Content-Length" != " Content-Length" // leading whitespace bad
+ */
+AWS_HTTP_API
+bool aws_http_header_name_eq(struct aws_byte_cursor name_a, struct aws_byte_cursor name_b);
+
+/**
+ * Create a new headers object.
+ * The caller has a hold on the object and must call aws_http_headers_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_headers *aws_http_headers_new(struct aws_allocator *allocator);
+
+/**
+ * Acquire a hold on the object, preventing it from being deleted until
+ * aws_http_headers_release() is called by all those with a hold on it.
+ */
+AWS_HTTP_API
+void aws_http_headers_acquire(struct aws_http_headers *headers);
+
+/**
+ * Release a hold on the object.
+ * The object is deleted when all holds on it are released.
+ */
+AWS_HTTP_API
+void aws_http_headers_release(struct aws_http_headers *headers);
+
+/**
+ * Add a header.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header);
+
+/**
+ * Add a header.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value);
+
+/**
+ * Add an array of headers.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aws_http_header *array, size_t count);
+
+/**
+ * Set a header value.
+ * The header is added if necessary and any existing values for this name are removed.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_set(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value);
+
+/**
+ * Get the total number of headers.
+ */
+AWS_HTTP_API
+size_t aws_http_headers_count(const struct aws_http_headers *headers);
+
+/**
+ * Get the header at the specified index.
+ * The index of a given header may change any time headers are modified.
+ * When iterating headers, the following ordering rules apply:
+ *
+ * - Headers with the same name will always be in the same order, relative to one another.
+ * If "A: one" is added before "A: two", then "A: one" will always precede "A: two".
+ *
+ * - Headers with different names could be in any order, relative to one another.
+ * If "A: one" is seen before "B: bee" in one iteration, you might see "B: bee" before "A: one" on the next.
+ *
+ * AWS_ERROR_INVALID_INDEX is raised if the index is invalid.
+ */
+AWS_HTTP_API
+int aws_http_headers_get_index(
+ const struct aws_http_headers *headers,
+ size_t index,
+ struct aws_http_header *out_header);
+
+/**
+ *
+ * Get all values with this name, combined into one new aws_string that you are responsible for destroying.
+ * If there are multiple headers with this name, their values are appended with comma-separators.
+ * If there are no headers with this name, NULL is returned and AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised.
+ */
+AWS_HTTP_API
+struct aws_string *aws_http_headers_get_all(const struct aws_http_headers *headers, struct aws_byte_cursor name);
+
+/**
+ * Get the first value for this name, ignoring any additional values.
+ * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if the name is not found.
+ */
+AWS_HTTP_API
+int aws_http_headers_get(
+ const struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor *out_value);
+
+/**
+ * Test if header name exists or not in headers
+ */
+AWS_HTTP_API
+bool aws_http_headers_has(const struct aws_http_headers *headers, struct aws_byte_cursor name);
+
+/**
+ * Remove all headers with this name.
+ * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if no headers with this name are found.
+ */
+AWS_HTTP_API
+int aws_http_headers_erase(struct aws_http_headers *headers, struct aws_byte_cursor name);
+
+/**
+ * Remove the first header found with this name and value.
+ * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if no such header is found.
+ */
+AWS_HTTP_API
+int aws_http_headers_erase_value(
+ struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value);
+
+/**
+ * Remove the header at the specified index.
+ *
+ * AWS_ERROR_INVALID_INDEX is raised if the index is invalid.
+ */
+AWS_HTTP_API
+int aws_http_headers_erase_index(struct aws_http_headers *headers, size_t index);
+
+/**
+ * Clear all headers.
+ */
+AWS_HTTP_API
+void aws_http_headers_clear(struct aws_http_headers *headers);
+
+/**
+ * Get the `:method` value (HTTP/2 headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_method(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_method);
+
+/**
+ * Set `:method` (HTTP/2 headers only).
+ * The headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_method(struct aws_http_headers *h2_headers, struct aws_byte_cursor method);
+
+/*
+ * Get the `:scheme` value (HTTP/2 headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_scheme(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_scheme);
+
+/**
+ * Set `:scheme` (request pseudo headers only).
+ * The pseudo headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_scheme(struct aws_http_headers *h2_headers, struct aws_byte_cursor scheme);
+
+/*
+ * Get the `:authority` value (request pseudo headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_authority(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_authority);
+
+/**
+ * Set `:authority` (request pseudo headers only).
+ * The pseudo headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_authority(struct aws_http_headers *h2_headers, struct aws_byte_cursor authority);
+
+/*
+ * Get the `:path` value (request pseudo headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_path(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_path);
+
+/**
+ * Set `:path` (request pseudo headers only).
+ * The pseudo headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_path(struct aws_http_headers *h2_headers, struct aws_byte_cursor path);
+
+/**
+ * Get `:status` (response pseudo headers only).
+ * If no status is set, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE is raised.
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_response_status(const struct aws_http_headers *h2_headers, int *out_status_code);
+
+/**
+ * Set `:status` (response pseudo headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_response_status(struct aws_http_headers *h2_headers, int status_code);
+
+/**
+ * Create a new HTTP/1.1 request message.
+ * The message is blank, all properties (method, path, etc) must be set individually.
+ * If HTTP/1.1 message used in HTTP/2 connection, the transformation will be automatically applied.
+ * A HTTP/2 message will created and sent based on the HTTP/1.1 message.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_request(struct aws_allocator *allocator);
+
+/**
+ * Like aws_http_message_new_request(), but uses existing aws_http_headers instead of creating a new one.
+ * Acquires a hold on the headers, and releases it when the request is destroyed.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_request_with_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers);
+
+/**
+ * Create a new HTTP/1.1 response message.
+ * The message is blank, all properties (status, headers, etc) must be set individually.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_response(struct aws_allocator *allocator);
+
+/**
+ * Create a new HTTP/2 request message.
+ * pseudo headers need to be set from aws_http2_headers_set_request_* to the headers of the aws_http_message.
+ * Will be errored out if used in HTTP/1.1 connection.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http2_message_new_request(struct aws_allocator *allocator);
+
+/**
+ * Create a new HTTP/2 response message.
+ * pseudo headers need to be set from aws_http2_headers_set_response_status to the headers of the aws_http_message.
+ * Will be errored out if used in HTTP/1.1 connection.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http2_message_new_response(struct aws_allocator *allocator);
+
+/**
+ * Create an HTTP/2 message from HTTP/1.1 message.
+ * pseudo headers will be created from the context and added to the headers of new message.
+ * Normal headers will be copied to the headers of new message.
+ * Note:
+ * - if `host` exist, it will be removed and `:authority` will be added using the information.
+ * - `:scheme` always defaults to "https". To use a different scheme create the HTTP/2 message directly
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http2_message_new_from_http1(
+ struct aws_allocator *alloc,
+ const struct aws_http_message *http1_msg);
+
+/**
+ * Acquire a hold on the object, preventing it from being deleted until
+ * aws_http_message_release() is called by all those with a hold on it.
+ *
+ * This function returns the passed in message (possibly NULL) so that acquire-and-assign can be done with a single
+ * statement.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_acquire(struct aws_http_message *message);
+
+/**
+ * Release a hold on the object.
+ * The object is deleted when all holds on it are released.
+ *
+ * This function always returns NULL so that release-and-assign-NULL can be done with a single statement.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_release(struct aws_http_message *message);
+
+/**
+ * Deprecated. This is equivalent to aws_http_message_release().
+ */
+AWS_HTTP_API
+void aws_http_message_destroy(struct aws_http_message *message);
+
+AWS_HTTP_API
+bool aws_http_message_is_request(const struct aws_http_message *message);
+
+AWS_HTTP_API
+bool aws_http_message_is_response(const struct aws_http_message *message);
+
+/**
+ * Get the protocol version of the http message.
+ */
+AWS_HTTP_API
+enum aws_http_version aws_http_message_get_protocol_version(const struct aws_http_message *message);
+
+/**
+ * Get the method (request messages only).
+ */
+AWS_HTTP_API
+int aws_http_message_get_request_method(
+ const struct aws_http_message *request_message,
+ struct aws_byte_cursor *out_method);
+
+/**
+ * Set the method (request messages only).
+ * The request makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http_message_set_request_method(struct aws_http_message *request_message, struct aws_byte_cursor method);
+
+/*
+ * Get the path-and-query value (request messages only).
+ */
+AWS_HTTP_API
+int aws_http_message_get_request_path(const struct aws_http_message *request_message, struct aws_byte_cursor *out_path);
+
+/**
+ * Set the path-and-query value (request messages only).
+ * The request makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http_message_set_request_path(struct aws_http_message *request_message, struct aws_byte_cursor path);
+
+/**
+ * Get the status code (response messages only).
+ * If no status is set, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE is raised.
+ */
+AWS_HTTP_API
+int aws_http_message_get_response_status(const struct aws_http_message *response_message, int *out_status_code);
+
+/**
+ * Set the status code (response messages only).
+ */
+AWS_HTTP_API
+int aws_http_message_set_response_status(struct aws_http_message *response_message, int status_code);
+
+/**
+ * Get the body stream.
+ * Returns NULL if no body stream is set.
+ */
+AWS_HTTP_API
+struct aws_input_stream *aws_http_message_get_body_stream(const struct aws_http_message *message);
+
+/**
+ * Set the body stream.
+ * NULL is an acceptable value for messages with no body.
+ * Note: The message does NOT take ownership of the body stream.
+ * The stream must not be destroyed until the message is complete.
+ */
+AWS_HTTP_API
+void aws_http_message_set_body_stream(struct aws_http_message *message, struct aws_input_stream *body_stream);
+
+/**
+ * Submit a chunk of data to be sent on an HTTP/1.1 stream.
+ * The stream must have specified "chunked" in a "transfer-encoding" header.
+ * For client streams, activate() must be called before any chunks are submitted.
+ * For server streams, the response must be submitted before any chunks.
+ * A final chunk with size 0 must be submitted to successfully complete the HTTP-stream.
+ *
+ * Returns AWS_OP_SUCCESS if the chunk has been submitted. The chunk's completion
+ * callback will be invoked when the HTTP-stream is done with the chunk data,
+ * whether or not it was successfully sent (see `aws_http1_stream_write_chunk_complete_fn`).
+ * The chunk data must remain valid until the completion callback is invoked.
+ *
+ * Returns AWS_OP_ERR and raises an error if the chunk could not be submitted.
+ * In this case, the chunk's completion callback will never be invoked.
+ * Note that it is always possible for the HTTP-stream to terminate unexpectedly
+ * prior to this call being made, in which case the error raised is
+ * AWS_ERROR_HTTP_STREAM_HAS_COMPLETED.
+ */
+AWS_HTTP_API int aws_http1_stream_write_chunk(
+ struct aws_http_stream *http1_stream,
+ const struct aws_http1_chunk_options *options);
+
+/**
+ * The stream must have specified `http2_use_manual_data_writes` during request creation.
+ * For client streams, activate() must be called before any frames are submitted.
+ * For server streams, the response headers must be submitted before any frames.
+ * A write with options that has end_stream set to be true will end the stream and prevent any further write.
+ *
+ * @return AWS_OP_SUCCESS if the write was queued
+ * AWS_OP_ERROR indicating the attempt raised an error code.
+ * AWS_ERROR_INVALID_STATE will be raised for invalid usage.
+ * AWS_ERROR_HTTP_STREAM_HAS_COMPLETED will be raised if the stream ended for reasons behind the scenes.
+ *
+ * Typical usage will be something like:
+ * options.http2_use_manual_data_writes = true;
+ * stream = aws_http_connection_make_request(connection, &options);
+ * aws_http_stream_activate(stream);
+ * ...
+ * struct aws_http2_stream_write_data_options write;
+ * aws_http2_stream_write_data(stream, &write);
+ * ...
+ * struct aws_http2_stream_write_data_options last_write;
+ * last_write.end_stream = true;
+ * aws_http2_stream_write_data(stream, &write);
+ * ...
+ * aws_http_stream_release(stream);
+ */
+AWS_HTTP_API int aws_http2_stream_write_data(
+ struct aws_http_stream *http2_stream,
+ const struct aws_http2_stream_write_data_options *options);
+
+/**
+ * Add a list of headers to be added as trailing headers sent after the last chunk is sent.
+ * a "Trailer" header field which indicates the fields present in the trailer.
+ *
+ * Certain headers are forbidden in the trailer (e.g., Transfer-Encoding, Content-Length, Host). See RFC-7541
+ * Section 4.1.2 for more details.
+ *
+ * For client streams, activate() must be called before any chunks are submitted.
+ *
+ * For server streams, the response must be submitted before the trailer can be added
+ *
+ * aws_http1_stream_add_chunked_trailer must be called before the final size 0 chunk, and at the moment can only
+ * be called once, though this could change if need be.
+ *
+ * Returns AWS_OP_SUCCESS if the chunk has been submitted.
+ */
+AWS_HTTP_API int aws_http1_stream_add_chunked_trailer(
+ struct aws_http_stream *http1_stream,
+ const struct aws_http_headers *trailing_headers);
+
+/**
+ *
+ * This datastructure has more functions for inspecting and modifying headers than
+ * are available on the aws_http_message datastructure.
+ */
+AWS_HTTP_API
+struct aws_http_headers *aws_http_message_get_headers(const struct aws_http_message *message);
+
+/**
+ * Get the message's const aws_http_headers.
+ */
+AWS_HTTP_API
+const struct aws_http_headers *aws_http_message_get_const_headers(const struct aws_http_message *message);
+
+/**
+ * Get the number of headers.
+ */
+AWS_HTTP_API
+size_t aws_http_message_get_header_count(const struct aws_http_message *message);
+
+/**
+ * Get the header at the specified index.
+ * This function cannot fail if a valid index is provided.
+ * Otherwise, AWS_ERROR_INVALID_INDEX will be raised.
+ *
+ * The underlying strings are stored within the message.
+ */
+AWS_HTTP_API
+int aws_http_message_get_header(
+ const struct aws_http_message *message,
+ struct aws_http_header *out_header,
+ size_t index);
+
+/**
+ * Add a header to the end of the array.
+ * The message makes its own copy of the underlying strings.
+ */
+AWS_HTTP_API
+int aws_http_message_add_header(struct aws_http_message *message, struct aws_http_header header);
+
+/**
+ * Add an array of headers to the end of the header array.
+ * The message makes its own copy of the underlying strings.
+ *
+ * This is a helper function useful when it's easier to define headers as a stack array, rather than calling add_header
+ * repeatedly.
+ */
+AWS_HTTP_API
+int aws_http_message_add_header_array(
+ struct aws_http_message *message,
+ const struct aws_http_header *headers,
+ size_t num_headers);
+
+/**
+ * Remove the header at the specified index.
+ * Headers after this index are all shifted back one position.
+ *
+ * This function cannot fail if a valid index is provided.
+ * Otherwise, AWS_ERROR_INVALID_INDEX will be raised.
+ */
+AWS_HTTP_API
+int aws_http_message_erase_header(struct aws_http_message *message, size_t index);
+
+/**
+ * Create a stream, with a client connection sending a request.
+ * The request does not start sending automatically once the stream is created. You must call
+ * aws_http_stream_activate to begin execution of the request.
+ *
+ * The `options` are copied during this call.
+ *
+ * Tip for language bindings: Do not bind the `options` struct. Use something more natural for your language,
+ * such as Builder Pattern in Java, or Python's ability to take many optional arguments by name.
+ *
+ * Note: The header of the request will be sent as it is when the message to send protocol matches the protocol of the
+ * connection.
+ * - No `user-agent` will be added.
+ * - No security check will be enforced. eg: `referer` header privacy should be enforced by the user-agent who adds the
+ * header
+ * - When HTTP/1 message sent on HTTP/2 connection, `aws_http2_message_new_from_http1` will be applied under the hood.
+ * - When HTTP/2 message sent on HTTP/1 connection, no change will be made.
+ */
+AWS_HTTP_API
+struct aws_http_stream *aws_http_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+/**
+ * Create a stream, with a server connection receiving and responding to a request.
+ * This function can only be called from the `aws_http_on_incoming_request_fn` callback.
+ * aws_http_stream_send_response() should be used to send a response.
+ */
+AWS_HTTP_API
+struct aws_http_stream *aws_http_stream_new_server_request_handler(
+ const struct aws_http_request_handler_options *options);
+
+/**
+ * Users must release the stream when they are done with it, or its memory will never be cleaned up.
+ * This will not cancel the stream, its callbacks will still fire if the stream is still in progress.
+ *
+ * Tips for language bindings:
+ * - Invoke this from the wrapper class's finalizer/destructor.
+ * - Do not let the wrapper class be destroyed until on_complete() has fired.
+ */
+AWS_HTTP_API
+void aws_http_stream_release(struct aws_http_stream *stream);
+
+/**
+ * Only used for client initiated streams (immediately following a call to aws_http_connection_make_request).
+ *
+ * Activates the request's outgoing stream processing.
+ */
+AWS_HTTP_API int aws_http_stream_activate(struct aws_http_stream *stream);
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_stream_get_connection(const struct aws_http_stream *stream);
+
+/* Only valid in "request" streams, once response headers start arriving */
+AWS_HTTP_API
+int aws_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status);
+
+/* Only valid in "request handler" streams, once request headers start arriving */
+AWS_HTTP_API
+int aws_http_stream_get_incoming_request_method(
+ const struct aws_http_stream *stream,
+ struct aws_byte_cursor *out_method);
+
+AWS_HTTP_API
+int aws_http_stream_get_incoming_request_uri(const struct aws_http_stream *stream, struct aws_byte_cursor *out_uri);
+
+/**
+ * Send response (only callable from "request handler" streams)
+ * The response object must stay alive at least until the stream's on_complete is called.
+ */
+AWS_HTTP_API
+int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response);
+
+/**
+ * Increment the stream's flow-control window to keep data flowing.
+ *
+ * If the connection was created with `manual_window_management` set true,
+ * the flow-control window of each stream will shrink as body data is received
+ * (headers, padding, and other metadata do not affect the window).
+ * The connection's `initial_window_size` determines the starting size of each stream's window.
+ * If a stream's flow-control window reaches 0, no further data will be received.
+ *
+ * If `manual_window_management` is false, this call will have no effect.
+ * The connection maintains its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ */
+AWS_HTTP_API
+void aws_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size);
+
+/**
+ * Gets the HTTP/2 id associated with a stream. Even h1 streams have an id (using the same allocation procedure
+ * as http/2) for easier tracking purposes. For client streams, this will only be non-zero after a successful call
+ * to aws_http_stream_activate()
+ */
+AWS_HTTP_API
+uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream);
+
+/**
+ * Reset the HTTP/2 stream (HTTP/2 only).
+ * Note that if the stream closes before this async call is fully processed, the RST_STREAM frame will not be sent.
+ *
+ * @param http2_stream HTTP/2 stream.
+ * @param http2_error aws_http2_error_code. Reason to reset the stream.
+ */
+AWS_HTTP_API
+int aws_http2_stream_reset(struct aws_http_stream *http2_stream, uint32_t http2_error);
+
+/**
+ * Get the error code received in rst_stream.
+ * Only valid if the stream has completed, and an RST_STREAM frame has received.
+ *
+ * @param http2_stream HTTP/2 stream.
+ * @param out_http2_error Gets to set to HTTP/2 error code received in rst_stream.
+ */
+AWS_HTTP_API
+int aws_http2_stream_get_received_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error);
+
+/**
+ * Get the HTTP/2 error code sent in the RST_STREAM frame (HTTP/2 only).
+ * Only valid if the stream has completed, and has sent an RST_STREAM frame.
+ *
+ * @param http2_stream HTTP/2 stream.
+ * @param out_http2_error Gets to set to HTTP/2 error code sent in rst_stream.
+ */
+AWS_HTTP_API
+int aws_http2_stream_get_sent_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_REQUEST_RESPONSE_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/server.h b/contrib/restricted/aws/aws-c-http/include/aws/http/server.h
new file mode 100644
index 00000000000..0e1be3d8c01
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/server.h
@@ -0,0 +1,198 @@
+#ifndef AWS_HTTP_SERVER_H
+#define AWS_HTTP_SERVER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http_connection;
+struct aws_server_bootstrap;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+/**
+ * A listening socket which accepts incoming HTTP connections,
+ * creating a server-side aws_http_connection to handle each one.
+ */
+struct aws_http_server;
+struct aws_http_stream;
+
+typedef void(aws_http_server_on_incoming_connection_fn)(
+ struct aws_http_server *server,
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+typedef void(aws_http_server_on_destroy_fn)(void *user_data);
+
+/**
+ * Options for creating an HTTP server.
+ * Initialize with AWS_HTTP_SERVER_OPTIONS_INIT to set default values.
+ */
+struct aws_http_server_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Set by AWS_HTTP_SERVER_OPTIONS_INIT.
+ */
+ size_t self_size;
+
+ /**
+ * Required.
+ * Must outlive server.
+ */
+ struct aws_allocator *allocator;
+
+ /**
+ * Required.
+ * Must outlive server.
+ */
+ struct aws_server_bootstrap *bootstrap;
+
+ /**
+ * Required.
+ * Server makes copy.
+ */
+ struct aws_socket_endpoint *endpoint;
+
+ /**
+ * Required.
+ * Server makes a copy.
+ */
+ struct aws_socket_options *socket_options;
+
+ /**
+ * Optional.
+ * Server copies all contents except the `aws_tls_ctx`, which must outlive the server.
+ */
+ struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Initial window size for incoming connections.
+ * Optional.
+ * A default size is set by AWS_HTTP_SERVER_OPTIONS_INIT.
+ */
+ size_t initial_window_size;
+
+ /**
+ * User data passed to callbacks.
+ * Optional.
+ */
+ void *server_user_data;
+
+ /**
+ * Invoked when an incoming connection has been set up, or when setup has failed.
+ * Required.
+ * If setup succeeds, the user must call aws_http_connection_configure_server().
+ */
+ aws_http_server_on_incoming_connection_fn *on_incoming_connection;
+
+ /**
+ * Invoked when the server finishes the destroy operation.
+ * Optional.
+ */
+ aws_http_server_on_destroy_fn *on_destroy_complete;
+
+ /**
+ * Set to true to manually manage the read window size.
+ *
+ * If this is false, the connection will maintain a constant window size.
+ *
+ * If this is true, the caller must manually increment the window size using aws_http_stream_update_window().
+ * If the window is not incremented, it will shrink by the amount of body data received. If the window size
+ * reaches 0, no further data will be received.
+ **/
+ bool manual_window_management;
+};
+
+/**
+ * Initializes aws_http_server_options with default values.
+ */
+#define AWS_HTTP_SERVER_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_server_options), .initial_window_size = SIZE_MAX, }
+
+/**
+ * Invoked at the start of an incoming request.
+ * To process the request, the user must create a request handler stream and return it to the connection.
+ * If NULL is returned, the request will not be processed and the last error will be reported as the reason for failure.
+ */
+typedef struct aws_http_stream *(
+ aws_http_on_incoming_request_fn)(struct aws_http_connection *connection, void *user_data);
+
+typedef void(aws_http_on_server_connection_shutdown_fn)(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *connection_user_data);
+
+/**
+ * Options for configuring a server-side aws_http_connection.
+ * Initialized with AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT to set default values.
+ */
+struct aws_http_server_connection_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Set by AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT.
+ */
+ size_t self_size;
+
+ /**
+ * User data specific to this connection.
+ * Optional.
+ */
+ void *connection_user_data;
+
+ /**
+ * Invoked at the start of an incoming request.
+ * Required.
+ * The user must create a request handler stream and return it to the connection.
+ * See `aws_http_on_incoming_request_fn`.
+ */
+ aws_http_on_incoming_request_fn *on_incoming_request;
+
+ /**
+ * Invoked when the connection is shut down.
+ * Optional.
+ */
+ aws_http_on_server_connection_shutdown_fn *on_shutdown;
+};
+
+/**
+ * Initializes aws_http_server_connection_options with default values.
+ */
+#define AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_server_connection_options), }
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Create server, a listening socket that accepts incoming connections.
+ */
+AWS_HTTP_API
+struct aws_http_server *aws_http_server_new(const struct aws_http_server_options *options);
+
+/**
+ * Release the server. It will close the listening socket and all the connections existing in the server.
+ * The on_destroy_complete will be invoked when the destroy operation completes
+ */
+AWS_HTTP_API
+void aws_http_server_release(struct aws_http_server *server);
+
+/**
+ * Configure a server connection.
+ * This must be called from the server's on_incoming_connection callback.
+ */
+AWS_HTTP_API
+int aws_http_connection_configure_server(
+ struct aws_http_connection *connection,
+ const struct aws_http_server_connection_options *options);
+
+/**
+ * Returns true if this is a server connection.
+ */
+AWS_HTTP_API
+bool aws_http_connection_is_server(const struct aws_http_connection *connection);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_SERVER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h b/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h
new file mode 100644
index 00000000000..ecc8c2700ab
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h
@@ -0,0 +1,75 @@
+#ifndef AWS_HTTP_STATISTICS_H
+#define AWS_HTTP_STATISTICS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/common/statistics.h>
+
+enum aws_crt_http_statistics_category {
+ AWSCRT_STAT_CAT_HTTP1_CHANNEL = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID),
+ AWSCRT_STAT_CAT_HTTP2_CHANNEL,
+};
+
+/**
+ * A statistics struct for http handlers. Tracks the actual amount of time that incoming and outgoing requests are
+ * waiting for their IO to complete.
+ */
+struct aws_crt_statistics_http1_channel {
+ aws_crt_statistics_category_t category;
+
+ uint64_t pending_outgoing_stream_ms;
+ uint64_t pending_incoming_stream_ms;
+
+ uint32_t current_outgoing_stream_id;
+ uint32_t current_incoming_stream_id;
+};
+
+struct aws_crt_statistics_http2_channel {
+ aws_crt_statistics_category_t category;
+
+ uint64_t pending_outgoing_stream_ms;
+ uint64_t pending_incoming_stream_ms;
+
+ /* True if during the time of report, there has ever been no active streams on the connection */
+ bool was_inactive;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes a http channel handler statistics struct
+ */
+AWS_HTTP_API
+int aws_crt_statistics_http1_channel_init(struct aws_crt_statistics_http1_channel *stats);
+
+/**
+ * Cleans up a http channel handler statistics struct
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http1_channel_cleanup(struct aws_crt_statistics_http1_channel *stats);
+
+/**
+ * Resets a http channel handler statistics struct's statistics
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http1_channel_reset(struct aws_crt_statistics_http1_channel *stats);
+
+/**
+ * Initializes a HTTP/2 channel handler statistics struct
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http2_channel_init(struct aws_crt_statistics_http2_channel *stats);
+/**
+ * Resets a HTTP/2 channel handler statistics struct's statistics
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http2_channel_reset(struct aws_crt_statistics_http2_channel *stats);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_STATISTICS_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h b/contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h
new file mode 100644
index 00000000000..292f8662399
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h
@@ -0,0 +1,82 @@
+#ifndef AWS_HTTP_STATUS_CODE_H
+#define AWS_HTTP_STATUS_CODE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/*
+ * Define most of the http response codes we probably will use.
+ * https://www.iana.org/assignments/http-status-codes/http-status-codes.txt
+ * This is NOT a definitive list of codes.
+ */
+enum aws_http_status_code {
+ /*
+ * This is a special response code defined for convenience in error processing,
+ * indicating processing of http request met error and didn't reach server.
+ */
+ AWS_HTTP_STATUS_CODE_UNKNOWN = -1,
+ AWS_HTTP_STATUS_CODE_100_CONTINUE = 100,
+ AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS = 101,
+ AWS_HTTP_STATUS_CODE_102_PROCESSING = 102,
+ AWS_HTTP_STATUS_CODE_103_EARLY_HINTS = 103,
+ AWS_HTTP_STATUS_CODE_200_OK = 200,
+ AWS_HTTP_STATUS_CODE_201_CREATED = 201,
+ AWS_HTTP_STATUS_CODE_202_ACCEPTED = 202,
+ AWS_HTTP_STATUS_CODE_203_NON_AUTHORITATIVE_INFORMATION = 203,
+ AWS_HTTP_STATUS_CODE_204_NO_CONTENT = 204,
+ AWS_HTTP_STATUS_CODE_205_RESET_CONTENT = 205,
+ AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT = 206,
+ AWS_HTTP_STATUS_CODE_207_MULTI_STATUS = 207,
+ AWS_HTTP_STATUS_CODE_208_ALREADY_REPORTED = 208,
+ AWS_HTTP_STATUS_CODE_226_IM_USED = 226,
+ AWS_HTTP_STATUS_CODE_300_MULTIPLE_CHOICES = 300,
+ AWS_HTTP_STATUS_CODE_301_MOVED_PERMANENTLY = 301,
+ AWS_HTTP_STATUS_CODE_302_FOUND = 302,
+ AWS_HTTP_STATUS_CODE_303_SEE_OTHER = 303,
+ AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED = 304,
+ AWS_HTTP_STATUS_CODE_305_USE_PROXY = 305,
+ AWS_HTTP_STATUS_CODE_307_TEMPORARY_REDIRECT = 307,
+ AWS_HTTP_STATUS_CODE_308_PERMANENT_REDIRECT = 308,
+ AWS_HTTP_STATUS_CODE_400_BAD_REQUEST = 400,
+ AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED = 401,
+ AWS_HTTP_STATUS_CODE_402_PAYMENT_REQUIRED = 402,
+ AWS_HTTP_STATUS_CODE_403_FORBIDDEN = 403,
+ AWS_HTTP_STATUS_CODE_404_NOT_FOUND = 404,
+ AWS_HTTP_STATUS_CODE_405_METHOD_NOT_ALLOWED = 405,
+ AWS_HTTP_STATUS_CODE_406_NOT_ACCEPTABLE = 406,
+ AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED = 407,
+ AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT = 408,
+ AWS_HTTP_STATUS_CODE_409_CONFLICT = 409,
+ AWS_HTTP_STATUS_CODE_410_GONE = 410,
+ AWS_HTTP_STATUS_CODE_411_LENGTH_REQUIRED = 411,
+ AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED = 412,
+ AWS_HTTP_STATUS_CODE_413_REQUEST_ENTITY_TOO_LARGE = 413,
+ AWS_HTTP_STATUS_CODE_414_REQUEST_URI_TOO_LONG = 414,
+ AWS_HTTP_STATUS_CODE_415_UNSUPPORTED_MEDIA_TYPE = 415,
+ AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416,
+ AWS_HTTP_STATUS_CODE_417_EXPECTATION_FAILED = 417,
+ AWS_HTTP_STATUS_CODE_421_MISDIRECTED_REQUEST = 421,
+ AWS_HTTP_STATUS_CODE_422_UNPROCESSABLE_ENTITY = 422,
+ AWS_HTTP_STATUS_CODE_423_LOCKED = 423,
+ AWS_HTTP_STATUS_CODE_424_FAILED_DEPENDENCY = 424,
+ AWS_HTTP_STATUS_CODE_425_TOO_EARLY = 425,
+ AWS_HTTP_STATUS_CODE_426_UPGRADE_REQUIRED = 426,
+ AWS_HTTP_STATUS_CODE_428_PRECONDITION_REQUIRED = 428,
+ AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS = 429,
+ AWS_HTTP_STATUS_CODE_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431,
+ AWS_HTTP_STATUS_CODE_451_UNAVAILABLE_FOR_LEGAL_REASON = 451,
+ AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR = 500,
+ AWS_HTTP_STATUS_CODE_501_NOT_IMPLEMENTED = 501,
+ AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY = 502,
+ AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE = 503,
+ AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT = 504,
+ AWS_HTTP_STATUS_CODE_505_HTTP_VERSION_NOT_SUPPORTED = 505,
+ AWS_HTTP_STATUS_CODE_506_VARIANT_ALSO_NEGOTIATES = 506,
+ AWS_HTTP_STATUS_CODE_507_INSUFFICIENT_STORAGE = 507,
+ AWS_HTTP_STATUS_CODE_508_LOOP_DETECTED = 508,
+ AWS_HTTP_STATUS_CODE_510_NOT_EXTENDED = 510,
+ AWS_HTTP_STATUS_CODE_511_NETWORK_AUTHENTICATION_REQUIRED = 511,
+};
+#endif /* AWS_HTTP_STATUS_CODE_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h b/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h
new file mode 100644
index 00000000000..6f85cafa810
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h
@@ -0,0 +1,483 @@
+#ifndef AWS_HTTP_WEBSOCKET_H
+#define AWS_HTTP_WEBSOCKET_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http_header;
+struct aws_http_message;
+
+/* TODO: Document lifetime stuff */
+/* TODO: Document CLOSE frame behavior (when auto-sent during close, when auto-closed) */
+/* TODO: Accept payload as aws_input_stream */
+
+/**
+ * A websocket connection.
+ */
+struct aws_websocket;
+
+/**
+ * Opcode describing the type of a websocket frame.
+ * RFC-6455 Section 5.2
+ */
+enum aws_websocket_opcode {
+ AWS_WEBSOCKET_OPCODE_CONTINUATION = 0x0,
+ AWS_WEBSOCKET_OPCODE_TEXT = 0x1,
+ AWS_WEBSOCKET_OPCODE_BINARY = 0x2,
+ AWS_WEBSOCKET_OPCODE_CLOSE = 0x8,
+ AWS_WEBSOCKET_OPCODE_PING = 0x9,
+ AWS_WEBSOCKET_OPCODE_PONG = 0xA,
+};
+
+#define AWS_WEBSOCKET_MAX_PAYLOAD_LENGTH 0x7FFFFFFFFFFFFFFF
+#define AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH 25
+#define AWS_WEBSOCKET_CLOSE_TIMEOUT 1000000000 // nanos -> 1 sec
+
+/**
+ * Data passed to the websocket on_connection_setup callback.
+ *
+ * An error_code of zero indicates that setup was completely successful.
+ * You own the websocket pointer now and must call aws_websocket_release() when you are done with it.
+ * You can inspect the response headers, if you're interested.
+ *
+ * A non-zero error_code indicates that setup failed.
+ * The websocket pointer will be NULL.
+ * If the server sent a response, you can inspect its status-code, headers, and body,
+ * but this data will NULL if setup failed before a full response could be received.
+ * If you wish to persist data from the response make a deep copy.
+ * The response data becomes invalid once the callback completes.
+ */
+struct aws_websocket_on_connection_setup_data {
+ int error_code;
+ struct aws_websocket *websocket;
+ const int *handshake_response_status;
+ const struct aws_http_header *handshake_response_header_array;
+ size_t num_handshake_response_headers;
+ const struct aws_byte_cursor *handshake_response_body;
+};
+
+/**
+ * Called when websocket setup is complete.
+ * Called exactly once on the websocket's event-loop thread.
+ * See `aws_websocket_on_connection_setup_data`.
+ */
+typedef void(
+ aws_websocket_on_connection_setup_fn)(const struct aws_websocket_on_connection_setup_data *setup, void *user_data);
+
+/**
+ * Called when the websocket has finished shutting down.
+ * Called once on the websocket's event-loop thread if setup succeeded.
+ * If setup failed, this is never called.
+ */
+typedef void(aws_websocket_on_connection_shutdown_fn)(struct aws_websocket *websocket, int error_code, void *user_data);
+
+/**
+ * Data about an incoming frame.
+ * See RFC-6455 Section 5.2.
+ */
+struct aws_websocket_incoming_frame {
+ uint64_t payload_length;
+ uint8_t opcode;
+ bool fin;
+};
+
+/**
+ * Called when a new frame arrives.
+ * Invoked once per frame on the websocket's event-loop thread.
+ * Each incoming-frame-begin call will eventually be followed by an incoming-frame-complete call,
+ * before the next frame begins and before the websocket shuts down.
+ *
+ * Return true to proceed normally. If false is returned, the websocket will read no further data,
+ * the frame will complete with an error-code, and the connection will close.
+ */
+typedef bool(aws_websocket_on_incoming_frame_begin_fn)(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_incoming_frame *frame,
+ void *user_data);
+
+/**
+ * Called repeatedly as payload data arrives.
+ * Invoked 0 or more times on the websocket's event-loop thread.
+ * Payload data will not be valid after this call, so copy if necessary.
+ * The payload data is always unmasked at this point.
+ *
+ * NOTE: If you created the websocket with `manual_window_management` set true, you must maintain the read window.
+ * Whenever the read window reaches 0, you will stop receiving anything.
+ * The websocket's `initial_window_size` determines the starting size of the read window.
+ * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION).
+ * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing.
+ * Maintain a larger window to keep up high throughput.
+ * You only need to worry about the payload from "data" frames.
+ * The websocket automatically increments the window to account for any
+ * other incoming bytes, including other parts of a frame (opcode, payload-length, etc)
+ * and the payload of other frame types (PING, PONG, CLOSE).
+ *
+ * Return true to proceed normally. If false is returned, the websocket will read no further data,
+ * the frame will complete with an error-code, and the connection will close.
+ */
+typedef bool(aws_websocket_on_incoming_frame_payload_fn)(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_incoming_frame *frame,
+ struct aws_byte_cursor data,
+ void *user_data);
+
+/**
+ * Called when done processing an incoming frame.
+ * If error_code is non-zero, an error occurred and the payload may not have been completely received.
+ * Invoked once per frame on the websocket's event-loop thread.
+ *
+ * Return true to proceed normally. If false is returned, the websocket will read no further data
+ * and the connection will close.
+ */
+typedef bool(aws_websocket_on_incoming_frame_complete_fn)(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_incoming_frame *frame,
+ int error_code,
+ void *user_data);
+
+/**
+ * Options for creating a websocket client connection.
+ */
+struct aws_websocket_client_connection_options {
+ /**
+ * Required.
+ * Must outlive the connection.
+ */
+ struct aws_allocator *allocator;
+
+ /**
+ * Required.
+ * The connection keeps the bootstrap alive via ref-counting.
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /**
+ * Required.
+ * aws_websocket_client_connect() makes a copy.
+ */
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Optional.
+ * aws_websocket_client_connect() deep-copies all contents,
+ * and keeps the `aws_tls_ctx` alive via ref-counting.
+ */
+ const struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Optional
+ * Configuration options related to http proxy usage.
+ */
+ const struct aws_http_proxy_options *proxy_options;
+
+ /**
+ * Required.
+ * aws_websocket_client_connect() makes a copy.
+ */
+ struct aws_byte_cursor host;
+
+ /**
+ * Optional.
+ * Defaults to 443 if tls_options is present, 80 if it is not.
+ */
+ uint16_t port;
+
+ /**
+ * Required.
+ * The request will be kept alive via ref-counting until the handshake completes.
+ * Suggestion: create via aws_http_message_new_websocket_handshake_request()
+ *
+ * The method MUST be set to GET.
+ * The following headers are required (replace values in []):
+ *
+ * Host: [server.example.com]
+ * Upgrade: websocket
+ * Connection: Upgrade
+ * Sec-WebSocket-Key: [dGhlIHNhbXBsZSBub25jZQ==]
+ * Sec-WebSocket-Version: 13
+ *
+ * Sec-Websocket-Key should be a random 16 bytes value, Base64 encoded.
+ */
+ struct aws_http_message *handshake_request;
+
+ /**
+ * Initial size of the websocket's read window.
+ * Ignored unless `manual_window_management` is true.
+ * Set to 0 to prevent any incoming websocket frames until aws_websocket_increment_read_window() is called.
+ */
+ size_t initial_window_size;
+
+ /**
+ * User data for callbacks.
+ * Optional.
+ */
+ void *user_data;
+
+ /**
+ * Called when connect completes.
+ * Required.
+ * If unsuccessful, error_code will be set, connection will be NULL,
+ * and the on_connection_shutdown callback will never be called.
+ * If successful, the user is now responsible for the websocket and must
+ * call aws_websocket_release() when they are done with it.
+ */
+ aws_websocket_on_connection_setup_fn *on_connection_setup;
+
+ /**
+ * Called when connection has finished shutting down.
+ * Optional.
+ * Never called if `on_connection_setup` reported failure.
+ * Note that the connection is not completely done until `on_connection_shutdown` has been called
+ * AND aws_websocket_release() has been called.
+ */
+ aws_websocket_on_connection_shutdown_fn *on_connection_shutdown;
+
+ /**
+ * Called when each new frame arrives.
+ * Optional.
+ * See `aws_websocket_on_incoming_frame_begin_fn`.
+ */
+ aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin;
+
+ /**
+ * Called repeatedly as payload data arrives.
+ * Optional.
+ * See `aws_websocket_on_incoming_frame_payload_fn`.
+ */
+ aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload;
+
+ /**
+ * Called when done processing an incoming frame.
+ * Optional.
+ * See `aws_websocket_on_incoming_frame_complete_fn`.
+ */
+ aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete;
+
+ /**
+ * Set to true to manually manage the read window size.
+ *
+ * If this is false, no backpressure is applied and frames will arrive as fast as possible.
+ *
+ * If this is true, then whenever the read window reaches 0 you will stop receiving anything.
+ * The websocket's `initial_window_size` determines the starting size of the read window.
+ * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION).
+ * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing.
+ * Maintain a larger window to keep up high throughput.
+ * You only need to worry about the payload from "data" frames.
+ * The websocket automatically increments the window to account for any
+ * other incoming bytes, including other parts of a frame (opcode, payload-length, etc)
+ * and the payload of other frame types (PING, PONG, CLOSE).
+ */
+ bool manual_window_management;
+
+ /**
+ * Optional
+ * If set, requests that a specific event loop be used to seat the connection, rather than the next one
+ * in the event loop group. Useful for serializing all io and external events related to a client onto
+ * a single thread.
+ */
+ struct aws_event_loop *requested_event_loop;
+};
+
+/**
+ * Called repeatedly as the websocket's payload is streamed out.
+ * The user should write payload data to out_buf, up to available capacity.
+ * The websocket will mask this data for you, if necessary.
+ * Invoked repeatedly on the websocket's event-loop thread.
+ *
+ * Return true to proceed normally. If false is returned, the websocket will send no further data,
+ * the frame will complete with an error-code, and the connection will close.
+ */
+typedef bool(aws_websocket_stream_outgoing_payload_fn)(
+ struct aws_websocket *websocket,
+ struct aws_byte_buf *out_buf,
+ void *user_data);
+
+/**
+ * Called when a aws_websocket_send_frame() operation completes.
+ * error_code will be zero if the operation was successful.
+ * "Success" does not guarantee that the peer actually received or processed the frame.
+ * Invoked exactly once per sent frame on the websocket's event-loop thread.
+ */
+typedef void(
+ aws_websocket_outgoing_frame_complete_fn)(struct aws_websocket *websocket, int error_code, void *user_data);
+
+/**
+ * Options for sending a websocket frame.
+ * This structure is copied immediately by aws_websocket_send().
+ * For descriptions of opcode, fin, and payload_length see in RFC-6455 Section 5.2.
+ */
+struct aws_websocket_send_frame_options {
+ /**
+ * Size of payload to be sent via `stream_outgoing_payload` callback.
+ */
+ uint64_t payload_length;
+
+ /**
+ * User data passed to callbacks.
+ */
+ void *user_data;
+
+ /**
+ * Callback for sending payload data.
+ * See `aws_websocket_stream_outgoing_payload_fn`.
+ * Required if `payload_length` is non-zero.
+ */
+ aws_websocket_stream_outgoing_payload_fn *stream_outgoing_payload;
+
+ /**
+ * Callback for completion of send operation.
+ * See `aws_websocket_outgoing_frame_complete_fn`.
+ * Optional.
+ */
+ aws_websocket_outgoing_frame_complete_fn *on_complete;
+
+ /**
+ * Frame type.
+ * `aws_websocket_opcode` enum provides standard values.
+ */
+ uint8_t opcode;
+
+ /**
+ * Indicates that this is the final fragment in a message. The first fragment MAY also be the final fragment.
+ */
+ bool fin;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Return true if opcode is for a data frame, false if opcode if for a control frame.
+ */
+AWS_HTTP_API
+bool aws_websocket_is_data_frame(uint8_t opcode);
+
+/**
+ * Asynchronously establish a client websocket connection.
+ * The on_connection_setup callback is invoked when the operation has finished creating a connection, or failed.
+ */
+AWS_HTTP_API
+int aws_websocket_client_connect(const struct aws_websocket_client_connection_options *options);
+
+/**
+ * Increment the websocket's ref-count, preventing it from being destroyed.
+ * @return Always returns the same pointer that is passed in.
+ */
+AWS_HTTP_API
+struct aws_websocket *aws_websocket_acquire(struct aws_websocket *websocket);
+
+/**
+ * Decrement the websocket's ref-count.
+ * When the ref-count reaches zero, the connection will shut down, if it hasn't already.
+ * Users must release the websocket when they are done with it.
+ * The websocket's memory cannot be reclaimed until this is done.
+ * Callbacks may continue firing after this is called, with "shutdown" being the final callback.
+ * This function may be called from any thread.
+ *
+ * It is safe to pass NULL, nothing will happen.
+ */
+AWS_HTTP_API
+void aws_websocket_release(struct aws_websocket *websocket);
+
+/**
+ * Close the websocket connection.
+ * It is safe to call this, even if the connection is already closed or closing.
+ * The websocket will attempt to send a CLOSE frame during normal shutdown.
+ * If `free_scarce_resources_immediately` is true, the connection will be torn down as quickly as possible.
+ * This function may be called from any thread.
+ */
+AWS_HTTP_API
+void aws_websocket_close(struct aws_websocket *websocket, bool free_scarce_resources_immediately);
+
+/**
+ * Send a websocket frame.
+ * The `options` struct is copied.
+ * A callback will be invoked when the operation completes.
+ * This function may be called from any thread.
+ */
+AWS_HTTP_API
+int aws_websocket_send_frame(struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options);
+
+/**
+ * Manually increment the read window to keep frames flowing.
+ *
+ * If the websocket was created with `manual_window_management` set true,
+ * then whenever the read window reaches 0 you will stop receiving data.
+ * The websocket's `initial_window_size` determines the starting size of the read window.
+ * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION).
+ * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing.
+ * Maintain a larger window to keep up high throughput.
+ * You only need to worry about the payload from "data" frames.
+ * The websocket automatically increments the window to account for any
+ * other incoming bytes, including other parts of a frame (opcode, payload-length, etc)
+ * and the payload of other frame types (PING, PONG, CLOSE).
+ *
+ * If the websocket was created with `manual_window_management` set false, this function does nothing.
+ *
+ * This function may be called from any thread.
+ */
+AWS_HTTP_API
+void aws_websocket_increment_read_window(struct aws_websocket *websocket, size_t size);
+
+/**
+ * Convert the websocket into a mid-channel handler.
+ * The websocket will stop being usable via its public API and become just another handler in the channel.
+ * The caller will likely install a channel handler to the right.
+ * This must not be called in the middle of an incoming frame (between "frame begin" and "frame complete" callbacks).
+ * This MUST be called from the websocket's thread.
+ *
+ * If successful:
+ * - Other than aws_websocket_release(), all calls to aws_websocket_x() functions are ignored.
+ * - The websocket will no longer invoke any "incoming frame" callbacks.
+ * - aws_io_messages written by a downstream handler will be wrapped in binary data frames and sent upstream.
+ * The data may be split/combined as it is sent along.
+ * - aws_io_messages read from upstream handlers will be scanned for binary data frames.
+ * The payloads of these frames will be sent downstream.
+ * The payloads may be split/combined as they are sent along.
+ * - An incoming close frame will automatically result in channel-shutdown.
+ * - aws_websocket_release() must still be called or the websocket and its channel will never be cleaned up.
+ * - The websocket will still invoke its "on connection shutdown" callback when channel shutdown completes.
+ *
+ * If unsuccessful, NULL is returned and the websocket is unchanged.
+ */
+AWS_HTTP_API
+int aws_websocket_convert_to_midchannel_handler(struct aws_websocket *websocket);
+
+/**
+ * Returns the websocket's underlying I/O channel.
+ */
+AWS_HTTP_API
+struct aws_channel *aws_websocket_get_channel(const struct aws_websocket *websocket);
+
+/**
+ * Generate value for a Sec-WebSocket-Key header and write it into `dst` buffer.
+ * The buffer should have at least AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH space available.
+ *
+ * This value is the base64 encoding of a random 16-byte value.
+ * RFC-6455 Section 4.1
+ */
+AWS_HTTP_API
+int aws_websocket_random_handshake_key(struct aws_byte_buf *dst);
+
+/**
+ * Create request with all required fields for a websocket upgrade request.
+ * The method and path are set, and the the following headers are added:
+ *
+ * Host: <host>
+ * Upgrade: websocket
+ * Connection: Upgrade
+ * Sec-WebSocket-Key: <base64 encoding of 16 random bytes>
+ * Sec-WebSocket-Version: 13
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_websocket_handshake_request(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_cursor host);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_WEBSOCKET_H */
diff --git a/contrib/restricted/aws/aws-c-http/source/connection.c b/contrib/restricted/aws/aws-c-http/source/connection.c
new file mode 100644
index 00000000000..f020823dcf1
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/connection.c
@@ -0,0 +1,1200 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/connection_monitor.h>
+
+#include <aws/http/private/h1_connection.h>
+#include <aws/http/private/h2_connection.h>
+
+#include <aws/http/private/proxy_impl.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/logging.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif
+
+static struct aws_http_connection_system_vtable s_default_system_vtable = {
+ .new_socket_channel = aws_client_bootstrap_new_socket_channel,
+};
+
+static const struct aws_http_connection_system_vtable *s_system_vtable_ptr = &s_default_system_vtable;
+
+void aws_http_client_bootstrap_destroy(struct aws_http_client_bootstrap *bootstrap) {
+ /* During allocating, the underlying stuctures should be allocated with the bootstrap by aws_mem_acquire_many. Thus,
+ * we only need to clean up the first pointer which is the bootstrap */
+ if (bootstrap->alpn_string_map) {
+ aws_hash_table_clean_up(bootstrap->alpn_string_map);
+ }
+ aws_mem_release(bootstrap->alloc, bootstrap);
+}
+
+void aws_http_connection_set_system_vtable(const struct aws_http_connection_system_vtable *system_vtable) {
+ s_system_vtable_ptr = system_vtable;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_alpn_protocol_http_1_1, "http/1.1");
+AWS_STATIC_STRING_FROM_LITERAL(s_alpn_protocol_http_2, "h2");
+
+struct aws_http_server {
+ struct aws_allocator *alloc;
+ struct aws_server_bootstrap *bootstrap;
+ bool is_using_tls;
+ bool manual_window_management;
+ size_t initial_window_size;
+ void *user_data;
+ aws_http_server_on_incoming_connection_fn *on_incoming_connection;
+ aws_http_server_on_destroy_fn *on_destroy_complete;
+ struct aws_socket *socket;
+
+ /* Any thread may touch this data, but the lock must be held */
+ struct {
+ struct aws_mutex lock;
+ bool is_shutting_down;
+ struct aws_hash_table channel_to_connection_map;
+ } synced_data;
+};
+
+static void s_server_lock_synced_data(struct aws_http_server *server) {
+ int err = aws_mutex_lock(&server->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+static void s_server_unlock_synced_data(struct aws_http_server *server) {
+ int err = aws_mutex_unlock(&server->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+/* Determine the http-version, create appropriate type of connection, and insert it into the channel. */
+struct aws_http_connection *aws_http_connection_new_channel_handler(
+ struct aws_allocator *alloc,
+ struct aws_channel *channel,
+ bool is_server,
+ bool is_using_tls,
+ bool manual_window_management,
+ bool prior_knowledge_http2,
+ size_t initial_window_size,
+ const struct aws_hash_table *alpn_string_map,
+ const struct aws_http1_connection_options *http1_options,
+ const struct aws_http2_connection_options *http2_options,
+ void *connection_user_data) {
+
+ struct aws_channel_slot *connection_slot = NULL;
+ struct aws_http_connection *connection = NULL;
+
+ /* Create slot for connection. */
+ connection_slot = aws_channel_slot_new(channel);
+ if (!connection_slot) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create slot in channel %p, error %d (%s).",
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ int err = aws_channel_slot_insert_end(channel, connection_slot);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to insert slot into channel %p, error %d (%s).",
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Determine HTTP version */
+ enum aws_http_version version = AWS_HTTP_VERSION_1_1;
+
+ if (is_using_tls) {
+ /* Query TLS channel handler (immediately to left in the channel) for negotiated ALPN protocol */
+ if (!connection_slot->adj_left || !connection_slot->adj_left->handler) {
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION, "static: Failed to find TLS handler in channel %p.", (void *)channel);
+ goto error;
+ }
+
+ struct aws_channel_slot *tls_slot = connection_slot->adj_left;
+ struct aws_channel_handler *tls_handler = tls_slot->handler;
+ struct aws_byte_buf protocol = aws_tls_handler_protocol(tls_handler);
+ if (protocol.len) {
+ bool customized = false;
+ if (alpn_string_map) {
+ customized = true;
+ struct aws_string *negotiated_result = aws_string_new_from_buf(alloc, &protocol);
+ struct aws_hash_element *found = NULL;
+ aws_hash_table_find(alpn_string_map, (void *)negotiated_result, &found);
+ if (found) {
+ version = (enum aws_http_version)(size_t)found->value;
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Customized ALPN protocol " PRInSTR " used. " PRInSTR " client connection established.",
+ AWS_BYTE_BUF_PRI(protocol),
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version)));
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Customized ALPN protocol " PRInSTR
+ " used. However the it's not found in the ALPN map provided.",
+ AWS_BYTE_BUF_PRI(protocol));
+ version = AWS_HTTP_VERSION_UNKNOWN;
+ }
+ aws_string_destroy(negotiated_result);
+ }
+ if (customized) {
+ /* Do nothing */
+ } else if (aws_string_eq_byte_buf(s_alpn_protocol_http_1_1, &protocol)) {
+ version = AWS_HTTP_VERSION_1_1;
+ } else if (aws_string_eq_byte_buf(s_alpn_protocol_http_2, &protocol)) {
+ version = AWS_HTTP_VERSION_2;
+ } else {
+ AWS_LOGF_WARN(AWS_LS_HTTP_CONNECTION, "static: Unrecognized ALPN protocol. Assuming HTTP/1.1");
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION, "static: Unrecognized ALPN protocol " PRInSTR, AWS_BYTE_BUF_PRI(protocol));
+
+ version = AWS_HTTP_VERSION_1_1;
+ }
+ }
+ } else {
+ if (prior_knowledge_http2) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "Using prior knowledge to start HTTP/2 connection");
+ version = AWS_HTTP_VERSION_2;
+ }
+ }
+
+ /* Create connection/handler */
+ switch (version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (is_server) {
+ connection = aws_http_connection_new_http1_1_server(
+ alloc, manual_window_management, initial_window_size, http1_options);
+ } else {
+ connection = aws_http_connection_new_http1_1_client(
+ alloc, manual_window_management, initial_window_size, http1_options);
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ if (is_server) {
+ connection = aws_http_connection_new_http2_server(alloc, manual_window_management, http2_options);
+ } else {
+ connection = aws_http_connection_new_http2_client(alloc, manual_window_management, http2_options);
+ }
+ break;
+ default:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Unsupported version " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version)));
+
+ aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL);
+ goto error;
+ }
+
+ if (!connection) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create " PRInSTR " %s connection object, error %d (%s).",
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version)),
+ is_server ? "server" : "client",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+ connection->user_data = connection_user_data;
+
+ /* Connect handler and slot */
+ if (aws_channel_slot_set_handler(connection_slot, &connection->channel_handler)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to set HTTP handler into slot on channel %p, error %d (%s).",
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ /* Success! Inform connection that installation is complete */
+ connection->vtable->on_channel_handler_installed(&connection->channel_handler, connection_slot);
+
+ return connection;
+
+error:
+ if (connection_slot) {
+ if (!connection_slot->handler && connection) {
+ aws_channel_handler_destroy(&connection->channel_handler);
+ }
+
+ aws_channel_slot_remove(connection_slot);
+ }
+
+ return NULL;
+}
+
+void aws_http_connection_close(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ connection->vtable->close(connection);
+}
+
+void aws_http_connection_stop_new_requests(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ connection->vtable->stop_new_requests(connection);
+}
+
+bool aws_http_connection_is_open(const struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ return connection->vtable->is_open(connection);
+}
+
+bool aws_http_connection_new_requests_allowed(const struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ return connection->vtable->new_requests_allowed(connection);
+}
+
+bool aws_http_connection_is_client(const struct aws_http_connection *connection) {
+ return connection->client_data;
+}
+
+bool aws_http_connection_is_server(const struct aws_http_connection *connection) {
+ return connection->server_data;
+}
+
+int aws_http2_connection_change_settings(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->change_settings(
+ http2_connection, settings_array, num_settings, on_completed, user_data);
+}
+
+int aws_http2_connection_ping(
+ struct aws_http_connection *http2_connection,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_ack,
+ void *user_data) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->send_ping(http2_connection, optional_opaque_data, on_ack, user_data);
+}
+
+void aws_http2_connection_send_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->send_goaway(http2_connection, http2_error, allow_more_streams, optional_debug_data);
+}
+
+int aws_http2_connection_get_sent_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(out_http2_error);
+ AWS_PRECONDITION(out_last_stream_id);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->get_sent_goaway(http2_connection, out_http2_error, out_last_stream_id);
+}
+
+int aws_http2_connection_get_received_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(out_http2_error);
+ AWS_PRECONDITION(out_last_stream_id);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->get_received_goaway(http2_connection, out_http2_error, out_last_stream_id);
+}
+
+void aws_http2_connection_get_local_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->get_local_settings(http2_connection, out_settings);
+}
+
+void aws_http2_connection_get_remote_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->get_remote_settings(http2_connection, out_settings);
+}
+
+void aws_http2_connection_update_window(struct aws_http_connection *http2_connection, uint32_t increment_size) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->update_window(http2_connection, increment_size);
+}
+
+struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ return connection->channel_slot->channel;
+}
+
+int aws_http_alpn_map_init(struct aws_allocator *allocator, struct aws_hash_table *map) {
+ AWS_ASSERT(allocator);
+ AWS_ASSERT(map);
+ int result = aws_hash_table_init(
+ map,
+ allocator,
+ 5 /* initial size */,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ NULL);
+ if (result) {
+ /* OOM will crash */
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed to initialize ALPN map with error code %d (%s)",
+ error_code,
+ aws_error_name(error_code));
+ }
+ return result;
+}
+
+void aws_http_connection_acquire(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ aws_atomic_fetch_add(&connection->refcount, 1);
+}
+
+void aws_http_connection_release(struct aws_http_connection *connection) {
+ if (!connection) {
+ return;
+ }
+ size_t prev_refcount = aws_atomic_fetch_sub(&connection->refcount, 1);
+ if (prev_refcount == 1) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Final connection refcount released, shut down if necessary.",
+ (void *)connection);
+
+ /* Channel might already be shut down, but make sure */
+ aws_channel_shutdown(connection->channel_slot->channel, AWS_ERROR_SUCCESS);
+
+ /* When the channel's refcount reaches 0, it destroys its slots/handlers, which will destroy the connection */
+ aws_channel_release_hold(connection->channel_slot->channel);
+ } else {
+ AWS_FATAL_ASSERT(prev_refcount != 0);
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection refcount released, %zu remaining.",
+ (void *)connection,
+ prev_refcount - 1);
+ }
+}
+
+/* At this point, the server bootstrapper has accepted an incoming connection from a client and set up a channel.
+ * Now we need to create an aws_http_connection and insert it into the channel as a channel-handler.
+ * Note: Be careful not to access server->socket until lock is acquired to avoid race conditions */
+static void s_server_bootstrap_on_accept_channel_setup(
+ struct aws_server_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_server *server = user_data;
+ bool user_cb_invoked = false;
+ struct aws_http_connection *connection = NULL;
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "%p: Incoming connection failed with error code %d (%s)",
+ (void *)server,
+ error_code,
+ aws_error_name(error_code));
+
+ goto error;
+ }
+ /* Create connection */
+ /* TODO: expose http1/2 options to server API */
+ struct aws_http1_connection_options http1_options;
+ AWS_ZERO_STRUCT(http1_options);
+ struct aws_http2_connection_options http2_options;
+ AWS_ZERO_STRUCT(http2_options);
+ connection = aws_http_connection_new_channel_handler(
+ server->alloc,
+ channel,
+ true,
+ server->is_using_tls,
+ server->manual_window_management,
+ false, /* prior_knowledge_http2 */
+ server->initial_window_size,
+ NULL, /* alpn_string_map */
+ &http1_options,
+ &http2_options,
+ NULL /* connection_user_data */);
+ if (!connection) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "%p: Failed to create connection object, error %d (%s).",
+ (void *)server,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ int put_err = 0;
+ /* BEGIN CRITICAL SECTION */
+ s_server_lock_synced_data(server);
+ if (server->synced_data.is_shutting_down) {
+ error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ if (!error_code) {
+ put_err = aws_hash_table_put(&server->synced_data.channel_to_connection_map, channel, connection, NULL);
+ }
+ s_server_unlock_synced_data(server);
+ /* END CRITICAL SECTION */
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_ERROR_HTTP_SERVER_CLOSED,
+ "id=%p: Incoming connection failed. The server is shutting down.",
+ (void *)server);
+ goto error;
+ }
+
+ if (put_err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "%p: %s:%d: Failed to store connection object, error %d (%s).",
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ /* Tell user of successful connection. */
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: " PRInSTR " server connection established at %p %s:%d.",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->http_version)),
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port);
+
+ server->on_incoming_connection(server, connection, AWS_ERROR_SUCCESS, server->user_data);
+ user_cb_invoked = true;
+
+ /* If user failed to configure the server during callback, shut down the channel. */
+ if (!connection->server_data->on_incoming_request) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Caller failed to invoke aws_http_connection_configure_server() during on_incoming_connection "
+ "callback, closing connection.",
+ (void *)connection);
+
+ aws_raise_error(AWS_ERROR_HTTP_REACTION_REQUIRED);
+ goto error;
+ }
+ return;
+
+error:
+
+ if (!error_code) {
+ error_code = aws_last_error();
+ }
+
+ if (!user_cb_invoked) {
+ server->on_incoming_connection(server, NULL, error_code, server->user_data);
+ }
+
+ if (channel) {
+ aws_channel_shutdown(channel, error_code);
+ }
+
+ if (connection) {
+ /* release the ref count for the user side */
+ aws_http_connection_release(connection);
+ }
+}
+
+/* clean the server memory up */
+static void s_http_server_clean_up(struct aws_http_server *server) {
+ if (!server) {
+ return;
+ }
+
+ aws_server_bootstrap_release(server->bootstrap);
+
+ /* invoke the user callback */
+ if (server->on_destroy_complete) {
+ server->on_destroy_complete(server->user_data);
+ }
+ aws_hash_table_clean_up(&server->synced_data.channel_to_connection_map);
+ aws_mutex_clean_up(&server->synced_data.lock);
+ aws_mem_release(server->alloc, server);
+}
+
+/* At this point, the channel for a server connection has completed shutdown, but hasn't been destroyed yet. */
+static void s_server_bootstrap_on_accept_channel_shutdown(
+ struct aws_server_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_server *server = user_data;
+
+ /* Figure out which connection this was, and remove that entry from the map.
+ * It won't be in the map if something went wrong while setting up the connection. */
+ struct aws_hash_element map_elem;
+ int was_present;
+
+ /* BEGIN CRITICAL SECTION */
+ s_server_lock_synced_data(server);
+ int remove_err =
+ aws_hash_table_remove(&server->synced_data.channel_to_connection_map, channel, &map_elem, &was_present);
+ s_server_unlock_synced_data(server);
+ /* END CRITICAL SECTION */
+
+ if (!remove_err && was_present) {
+ struct aws_http_connection *connection = map_elem.value;
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "id=%p: Server connection shut down.", (void *)connection);
+ /* Tell user about shutdown */
+ if (connection->server_data->on_shutdown) {
+ connection->server_data->on_shutdown(connection, error_code, connection->user_data);
+ }
+ }
+}
+
+/* the server listener has finished the destroy process, no existing connections
+ * finally safe to clean the server up */
+static void s_server_bootstrap_on_server_listener_destroy(struct aws_server_bootstrap *bootstrap, void *user_data) {
+ (void)bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_server *server = user_data;
+ s_http_server_clean_up(server);
+}
+
+struct aws_http_server *aws_http_server_new(const struct aws_http_server_options *options) {
+ aws_http_fatal_assert_library_initialized();
+
+ struct aws_http_server *server = NULL;
+
+ if (!options || options->self_size == 0 || !options->allocator || !options->bootstrap || !options->socket_options ||
+ !options->on_incoming_connection || !options->endpoint) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_SERVER, "static: Invalid options, cannot create server.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ /* nothing to clean up */
+ return NULL;
+ }
+
+ server = aws_mem_calloc(options->allocator, 1, sizeof(struct aws_http_server));
+ if (!server) {
+ /* nothing to clean up */
+ return NULL;
+ }
+
+ server->alloc = options->allocator;
+ server->bootstrap = aws_server_bootstrap_acquire(options->bootstrap);
+ server->is_using_tls = options->tls_options != NULL;
+ server->initial_window_size = options->initial_window_size;
+ server->user_data = options->server_user_data;
+ server->on_incoming_connection = options->on_incoming_connection;
+ server->on_destroy_complete = options->on_destroy_complete;
+ server->manual_window_management = options->manual_window_management;
+
+ int err = aws_mutex_init(&server->synced_data.lock);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER, "static: Failed to initialize mutex, error %d (%s).", err, aws_error_name(err));
+ goto mutex_error;
+ }
+ err = aws_hash_table_init(
+ &server->synced_data.channel_to_connection_map, server->alloc, 16, aws_hash_ptr, aws_ptr_eq, NULL, NULL);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "static: Cannot create server, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto hash_table_error;
+ }
+ /* Protect against callbacks firing before server->socket is set */
+ s_server_lock_synced_data(server);
+ if (options->tls_options) {
+ server->is_using_tls = true;
+ }
+
+ struct aws_server_socket_channel_bootstrap_options bootstrap_options = {
+ .enable_read_back_pressure = options->manual_window_management,
+ .tls_options = options->tls_options,
+ .bootstrap = options->bootstrap,
+ .socket_options = options->socket_options,
+ .incoming_callback = s_server_bootstrap_on_accept_channel_setup,
+ .shutdown_callback = s_server_bootstrap_on_accept_channel_shutdown,
+ .destroy_callback = s_server_bootstrap_on_server_listener_destroy,
+ .host_name = options->endpoint->address,
+ .port = options->endpoint->port,
+ .user_data = server,
+ };
+
+ server->socket = aws_server_bootstrap_new_socket_listener(&bootstrap_options);
+
+ s_server_unlock_synced_data(server);
+
+ if (!server->socket) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "static: Failed creating new socket listener, error %d (%s). Cannot create server.",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto socket_error;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_SERVER,
+ "%p %s:%d: Server setup complete, listening for incoming connections.",
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port);
+
+ return server;
+
+socket_error:
+ aws_hash_table_clean_up(&server->synced_data.channel_to_connection_map);
+hash_table_error:
+ aws_mutex_clean_up(&server->synced_data.lock);
+mutex_error:
+ aws_mem_release(server->alloc, server);
+ return NULL;
+}
+
+void aws_http_server_release(struct aws_http_server *server) {
+ if (!server) {
+ return;
+ }
+ bool already_shutting_down = false;
+ /* BEGIN CRITICAL SECTION */
+ s_server_lock_synced_data(server);
+ if (server->synced_data.is_shutting_down) {
+ already_shutting_down = true;
+ } else {
+ server->synced_data.is_shutting_down = true;
+ }
+ if (!already_shutting_down) {
+ /* shutdown all existing channels */
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(&server->synced_data.channel_to_connection_map);
+ !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+ struct aws_channel *channel = (struct aws_channel *)iter.element.key;
+ aws_channel_shutdown(channel, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+ }
+ s_server_unlock_synced_data(server);
+ /* END CRITICAL SECTION */
+
+ if (already_shutting_down) {
+ /* The service is already shutting down, not shutting it down again */
+ AWS_LOGF_TRACE(AWS_LS_HTTP_SERVER, "id=%p: The server is already shutting down", (void *)server);
+ return;
+ }
+
+ /* stop listening, clean up the socket, after all existing connections finish shutting down, the
+ * s_server_bootstrap_on_server_listener_destroy will be invoked, clean up of the server will be there */
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_SERVER,
+ "%p %s:%d: Shutting down the server.",
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port);
+
+ aws_server_bootstrap_destroy_socket_listener(server->bootstrap, server->socket);
+
+ /* wait for connections to finish shutting down
+ * clean up will be called from eventloop */
+}
+
+/* At this point, the channel bootstrapper has established a connection to the server and set up a channel.
+ * Now we need to create the aws_http_connection and insert it into the channel as a channel-handler. */
+static void s_client_bootstrap_on_channel_setup(
+ struct aws_client_bootstrap *channel_bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)channel_bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_client_bootstrap *http_bootstrap = user_data;
+
+ /* Contract for setup callbacks is: channel is NULL if error_code is non-zero. */
+ AWS_FATAL_ASSERT((error_code != 0) == (channel == NULL));
+
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Client connection failed with error %d (%s).",
+ error_code,
+ aws_error_name(error_code));
+
+ /* Immediately tell user of failed connection.
+ * No channel exists, so there will be no channel_shutdown callback. */
+ http_bootstrap->on_setup(NULL, error_code, http_bootstrap->user_data);
+
+ /* Clean up the http_bootstrap, it has no more work to do. */
+ aws_http_client_bootstrap_destroy(http_bootstrap);
+ return;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "static: Socket connected, creating client connection object.");
+
+ http_bootstrap->connection = aws_http_connection_new_channel_handler(
+ http_bootstrap->alloc,
+ channel,
+ false,
+ http_bootstrap->is_using_tls,
+ http_bootstrap->stream_manual_window_management,
+ http_bootstrap->prior_knowledge_http2,
+ http_bootstrap->initial_window_size,
+ http_bootstrap->alpn_string_map,
+ &http_bootstrap->http1_options,
+ &http_bootstrap->http2_options,
+ http_bootstrap->user_data);
+ if (!http_bootstrap->connection) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create the client connection object, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ if (aws_http_connection_monitoring_options_is_valid(&http_bootstrap->monitoring_options)) {
+ /*
+ * On creation we validate monitoring options, if they exist, and fail if they're not
+ * valid. So at this point, is_valid() functions as an is-monitoring-on? check. A false
+ * value here is not an error, it's just not enabled.
+ */
+ struct aws_crt_statistics_handler *http_connection_monitor =
+ aws_crt_statistics_handler_new_http_connection_monitor(
+ http_bootstrap->alloc, &http_bootstrap->monitoring_options);
+ if (http_connection_monitor == NULL) {
+ goto error;
+ }
+
+ aws_channel_set_statistics_handler(channel, http_connection_monitor);
+ }
+
+ http_bootstrap->connection->proxy_request_transform = http_bootstrap->proxy_request_transform;
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: " PRInSTR " client connection established.",
+ (void *)http_bootstrap->connection,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(http_bootstrap->connection->http_version)));
+
+ /* Tell user of successful connection.
+ * Then clear the on_setup callback so that we know it's been called */
+ http_bootstrap->on_setup(http_bootstrap->connection, AWS_ERROR_SUCCESS, http_bootstrap->user_data);
+ http_bootstrap->on_setup = NULL;
+
+ return;
+
+error:
+ /* Something went wrong. Invoke channel shutdown. Then wait for channel shutdown to complete
+ * before informing the user that setup failed and cleaning up the http_bootstrap.*/
+ aws_channel_shutdown(channel, aws_last_error());
+}
+
+/* At this point, the channel for a client connection has completed its shutdown */
+static void s_client_bootstrap_on_channel_shutdown(
+ struct aws_client_bootstrap *channel_bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)channel_bootstrap;
+ (void)channel;
+
+ AWS_ASSERT(user_data);
+ struct aws_http_client_bootstrap *http_bootstrap = user_data;
+
+ /* If on_setup hasn't been called yet, inform user of failed setup.
+ * If on_setup was already called, inform user that it's shut down now. */
+ if (http_bootstrap->on_setup) {
+ /* make super duper sure that failed setup receives a non-zero error_code */
+ if (error_code == 0) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Client setup failed with error %d (%s).",
+ error_code,
+ aws_error_name(error_code));
+
+ http_bootstrap->on_setup(NULL, error_code, http_bootstrap->user_data);
+
+ } else if (http_bootstrap->on_shutdown) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "%p: Client shutdown completed with error %d (%s).",
+ (void *)http_bootstrap->connection,
+ error_code,
+ aws_error_name(error_code));
+
+ http_bootstrap->on_shutdown(http_bootstrap->connection, error_code, http_bootstrap->user_data);
+ }
+
+ /* Clean up bootstrapper */
+ aws_http_client_bootstrap_destroy(http_bootstrap);
+}
+
+int s_validate_http_client_connection_options(const struct aws_http_client_connection_options *options) {
+ if (options->self_size == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, self size not initialized");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->allocator) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, no allocator supplied");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->host_name.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, empty host name.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->socket_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, socket options are null.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->on_setup) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, setup callback is null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* http2_options cannot be NULL here, calling function adds them if they were missing */
+ if (options->http2_options->num_initial_settings > 0 && options->http2_options->initial_settings_array) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Invalid connection options, h2 settings count is non-zero but settings array is null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->monitoring_options && !aws_http_connection_monitoring_options_is_valid(options->monitoring_options)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, invalid monitoring options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->prior_knowledge_http2 && options->tls_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: HTTP/2 prior knowledge only works with cleartext TCP.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct s_copy_alpn_string_map_context {
+ struct aws_hash_table *map;
+ struct aws_allocator *allocator;
+};
+
+/* put every item into the source to make a deep copy of the map */
+static int s_copy_alpn_string_map(void *context, struct aws_hash_element *item) {
+ struct s_copy_alpn_string_map_context *func_context = context;
+ struct aws_hash_table *dest = func_context->map;
+ /* make a deep copy of the string and hash map will own the copy */
+ struct aws_string *key_copy = aws_string_new_from_string(func_context->allocator, item->key);
+ int was_created;
+ if (aws_hash_table_put(dest, key_copy, item->value, &was_created)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed to copy ALPN map with error code %d (%s)",
+ error_code,
+ aws_error_name(error_code));
+ /* failed to put into the table, we need to clean up the copy ourselves */
+ aws_string_destroy(key_copy);
+ /* return error to stop iteration */
+ return AWS_COMMON_HASH_TABLE_ITER_ERROR;
+ }
+ if (!was_created) {
+ /* no new entry created, clean up the copy ourselves */
+ aws_string_destroy(key_copy);
+ }
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+}
+
+int aws_http_alpn_map_init_copy(
+ struct aws_allocator *allocator,
+ struct aws_hash_table *dest,
+ struct aws_hash_table *src) {
+ if (!src) {
+ AWS_ZERO_STRUCT(*dest);
+ return AWS_OP_SUCCESS;
+ }
+ if (!src->p_impl) {
+ AWS_ZERO_STRUCT(*dest);
+ return AWS_OP_SUCCESS;
+ }
+
+ if (aws_http_alpn_map_init(allocator, dest)) {
+ return AWS_OP_ERR;
+ }
+ struct s_copy_alpn_string_map_context context;
+ context.allocator = allocator;
+ context.map = dest;
+ /* make a deep copy of the map */
+ if (aws_hash_table_foreach(src, s_copy_alpn_string_map, &context)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed to copy ALPN map with error code %d (%s)",
+ error_code,
+ aws_error_name(error_code));
+ aws_hash_table_clean_up(dest);
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_client_connect_internal(
+ const struct aws_http_client_connection_options *orig_options,
+ aws_http_proxy_request_transform_fn *proxy_request_transform) {
+
+ if (!orig_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: http connection options are null.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ struct aws_http_client_bootstrap *http_bootstrap = NULL;
+ struct aws_string *host_name = NULL;
+ int err = 0;
+
+ /* make copy of options, and add defaults for missing optional structs */
+ struct aws_http_client_connection_options options = *orig_options;
+ struct aws_http1_connection_options default_http1_options;
+ AWS_ZERO_STRUCT(default_http1_options);
+ if (options.http1_options == NULL) {
+ options.http1_options = &default_http1_options;
+ }
+
+ struct aws_http2_connection_options default_http2_options;
+ AWS_ZERO_STRUCT(default_http2_options);
+ if (options.http2_options == NULL) {
+ options.http2_options = &default_http2_options;
+ }
+
+ /* validate options */
+ if (s_validate_http_client_connection_options(&options)) {
+ goto error;
+ }
+
+ AWS_FATAL_ASSERT(options.proxy_options == NULL);
+
+ /* bootstrap_new() functions requires a null-terminated c-str */
+ host_name = aws_string_new_from_cursor(options.allocator, &options.host_name);
+ if (!host_name) {
+ goto error;
+ }
+
+ struct aws_http2_setting *setting_array = NULL;
+ struct aws_hash_table *alpn_string_map = NULL;
+ aws_mem_acquire_many(
+ options.allocator,
+ 3,
+ &http_bootstrap,
+ sizeof(struct aws_http_client_bootstrap),
+ &setting_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting),
+ &alpn_string_map,
+ sizeof(struct aws_hash_table));
+
+ AWS_ZERO_STRUCT(*http_bootstrap);
+
+ http_bootstrap->alloc = options.allocator;
+ http_bootstrap->is_using_tls = options.tls_options != NULL;
+ http_bootstrap->stream_manual_window_management = options.manual_window_management;
+ http_bootstrap->prior_knowledge_http2 = options.prior_knowledge_http2;
+ http_bootstrap->initial_window_size = options.initial_window_size;
+ http_bootstrap->user_data = options.user_data;
+ http_bootstrap->on_setup = options.on_setup;
+ http_bootstrap->on_shutdown = options.on_shutdown;
+ http_bootstrap->proxy_request_transform = proxy_request_transform;
+ http_bootstrap->http1_options = *options.http1_options;
+ http_bootstrap->http2_options = *options.http2_options;
+
+ /* keep a copy of the settings array if it's not NULL */
+ if (options.http2_options->num_initial_settings > 0) {
+ memcpy(
+ setting_array,
+ options.http2_options->initial_settings_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting));
+ http_bootstrap->http2_options.initial_settings_array = setting_array;
+ }
+
+ if (options.alpn_string_map) {
+ if (aws_http_alpn_map_init_copy(options.allocator, alpn_string_map, options.alpn_string_map)) {
+ goto error;
+ }
+ http_bootstrap->alpn_string_map = alpn_string_map;
+ }
+
+ if (options.monitoring_options) {
+ http_bootstrap->monitoring_options = *options.monitoring_options;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "static: attempting to initialize a new client channel to %s:%d",
+ aws_string_c_str(host_name),
+ (int)options.port);
+
+ struct aws_socket_channel_bootstrap_options channel_options = {
+ .bootstrap = options.bootstrap,
+ .host_name = aws_string_c_str(host_name),
+ .port = options.port,
+ .socket_options = options.socket_options,
+ .tls_options = options.tls_options,
+ .setup_callback = s_client_bootstrap_on_channel_setup,
+ .shutdown_callback = s_client_bootstrap_on_channel_shutdown,
+ .enable_read_back_pressure = options.manual_window_management,
+ .user_data = http_bootstrap,
+ .requested_event_loop = options.requested_event_loop,
+ };
+
+ err = s_system_vtable_ptr->new_socket_channel(&channel_options);
+
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to initiate socket channel for new client connection, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ aws_string_destroy(host_name);
+ return AWS_OP_SUCCESS;
+
+error:
+ if (http_bootstrap) {
+ aws_http_client_bootstrap_destroy(http_bootstrap);
+ }
+
+ if (host_name) {
+ aws_string_destroy(host_name);
+ }
+
+ return AWS_OP_ERR;
+}
+
+int aws_http_client_connect(const struct aws_http_client_connection_options *options) {
+ aws_http_fatal_assert_library_initialized();
+ if (options->prior_knowledge_http2 && options->tls_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: HTTP/2 prior knowledge only works with cleartext TCP.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->proxy_options != NULL) {
+ return aws_http_client_connect_via_proxy(options);
+ } else {
+ if (!options->proxy_ev_settings || options->proxy_ev_settings->env_var_type != AWS_HPEV_ENABLE) {
+ return aws_http_client_connect_internal(options, NULL);
+ } else {
+ /* Proxy through envrionment variable is enabled */
+ return aws_http_client_connect_via_proxy(options);
+ }
+ }
+}
+
+enum aws_http_version aws_http_connection_get_version(const struct aws_http_connection *connection) {
+ return connection->http_version;
+}
+
+int aws_http_connection_configure_server(
+ struct aws_http_connection *connection,
+ const struct aws_http_server_connection_options *options) {
+
+ if (!connection || !options || !options->on_incoming_request) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "id=%p: Invalid server configuration options.", (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!connection->server_data) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Server-only function invoked on client, ignoring call.",
+ (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ if (connection->server_data->on_incoming_request) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Connection is already configured, ignoring call.", (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ connection->user_data = options->connection_user_data;
+ connection->server_data->on_incoming_request = options->on_incoming_request;
+ connection->server_data->on_shutdown = options->on_shutdown;
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Stream IDs are only 31 bits [5.1.1] */
+static const uint32_t MAX_STREAM_ID = UINT32_MAX >> 1;
+
+uint32_t aws_http_connection_get_next_stream_id(struct aws_http_connection *connection) {
+
+ uint32_t next_id = connection->next_stream_id;
+
+ if (AWS_UNLIKELY(next_id > MAX_STREAM_ID)) {
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "id=%p: All available stream ids are gone", (void *)connection);
+
+ next_id = 0;
+ aws_raise_error(AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED);
+ } else {
+ connection->next_stream_id += 2;
+ }
+ return next_id;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/connection_manager.c b/contrib/restricted/aws/aws-c-http/source/connection_manager.c
new file mode 100644
index 00000000000..30eda61778f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/connection_manager.c
@@ -0,0 +1,1560 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection_manager.h>
+
+#include <aws/http/connection.h>
+#include <aws/http/private/connection_manager_system_vtable.h>
+#include <aws/http/private/connection_monitor.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/proxy_impl.h>
+
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+#include <aws/io/logging.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif
+
+/*
+ * Established connections not currently in use are tracked via this structure.
+ */
+struct aws_idle_connection {
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
+ uint64_t cull_timestamp;
+ struct aws_http_connection *connection;
+};
+
+/*
+ * System vtable to use under normal circumstances
+ */
+static struct aws_http_connection_manager_system_vtable s_default_system_vtable = {
+ .create_connection = aws_http_client_connect,
+ .release_connection = aws_http_connection_release,
+ .close_connection = aws_http_connection_close,
+ .is_connection_available = aws_http_connection_new_requests_allowed,
+ .get_monotonic_time = aws_high_res_clock_get_ticks,
+ .is_callers_thread = aws_channel_thread_is_callers_thread,
+ .connection_get_channel = aws_http_connection_get_channel,
+ .connection_get_version = aws_http_connection_get_version,
+};
+
+const struct aws_http_connection_manager_system_vtable *g_aws_http_connection_manager_default_system_vtable_ptr =
+ &s_default_system_vtable;
+
+bool aws_http_connection_manager_system_vtable_is_valid(const struct aws_http_connection_manager_system_vtable *table) {
+ return table->create_connection && table->close_connection && table->release_connection &&
+ table->is_connection_available;
+}
+
+enum aws_http_connection_manager_state_type { AWS_HCMST_UNINITIALIZED, AWS_HCMST_READY, AWS_HCMST_SHUTTING_DOWN };
+
+/*
+ * AWS_HCMCT_VENDED_CONNECTION: The number of connections currently being used by external users.
+ * AWS_HCMCT_PENDING_CONNECTIONS: The number of pending new connection requests we have outstanding to the http
+ * layer.
+ * AWS_HCMCT_OPEN_CONNECTION: Always equal to # of connection shutdown callbacks not yet invoked
+ * or equivalently:
+ *
+ * # of connections ever created by the manager - # shutdown callbacks received
+ */
+enum aws_http_connection_manager_count_type {
+ AWS_HCMCT_VENDED_CONNECTION,
+ AWS_HCMCT_PENDING_CONNECTIONS,
+ AWS_HCMCT_OPEN_CONNECTION,
+ AWS_HCMCT_COUNT,
+};
+
+/**
+ * Vocabulary
+ * Acquisition - a request by a user for a connection
+ * Pending Acquisition - a request by a user for a new connection that has not been completed. It may be
+ * waiting on http, a release by another user, or the manager itself.
+ * Pending Connect - a request to the http layer for a new connection that has not been resolved yet
+ * Vended Connection - a successfully established connection that is currently in use by something; must
+ * be released (through the connection manager) by the user before anyone else can use it. The connection
+ * manager does not explicitly track vended connections.
+ * Task Set - A set of operations that should be attempted once the lock is released. A task set includes
+ * completion callbacks (which can't fail) and connection attempts (which can fail either immediately or
+ * asynchronously).
+ *
+ * Requirements/Assumptions
+ * (1) Don't invoke user callbacks while holding the internal state lock
+ * (2) Don't invoke downstream http calls while holding the internal state lock
+ * (3) Only log unusual or rare events while the lock is held. Common-path logging should be while it is
+ * not held.
+ * (4) Don't crash or do awful things (leaking resources is ok though) if the interface contract
+ * (ref counting + balanced acquire/release of connections) is violated by the user
+ *
+ * In order to fulfill (1) and (2), all side-effecting operations within the connection manager follow a pattern:
+ *
+ * (1) Lock
+ * (2) Make state changes based on the operation
+ * (3) Build a set of work (completions, connect calls, releases, self-destruction) as appropriate to the operation
+ * (4) Unlock
+ * (5) Execute the task set
+ *
+ * Asynchronous work order failures are handled in the async callback, but immediate failures require
+ * us to relock and update the internal state. When there's an immediate connect failure, we use a
+ * conservative policy to fail all excess (beyond the # of pending connects) acquisitions; this allows us
+ * to avoid a possible recursive invocation (and potential failures) to connect again.
+ *
+ * Lifecycle
+ * Our connection manager implementation has a reasonably complex lifecycle.
+ *
+ * All state around the life cycle is protected by a lock. It seemed too risky and error-prone
+ * to try and mix an atomic ref count with the internal tracking counters we need.
+ *
+ * Over the course of its lifetime, a connection manager moves through two states:
+ *
+ * READY - connections may be acquired and released. When the external ref count for the manager
+ * drops to zero, the manager moves to:
+ *
+ * TODO: Seems like connections can still be release while shutting down.
+ * SHUTTING_DOWN - connections may no longer be acquired and released (how could they if the external
+ * ref count was accurate?) but in case of user ref errors, we simply fail attempts to do so rather
+ * than crash or underflow. While in this state, we wait for a set of tracking counters to all fall to zero:
+ *
+ * pending_connect_count - the # of unresolved calls to the http layer's connect logic
+ * open_connection_count - the # of connections for whom the shutdown callback (from http) has not been invoked
+ * vended_connection_count - the # of connections held by external users that haven't been released. Under correct
+ * usage this should be zero before SHUTTING_DOWN is entered, but we attempt to handle incorrect usage gracefully.
+ *
+ * While all the counter fall to zero and no outlife transition, connection manager will detroy itself.
+ *
+ * While shutting down, as pending connects resolve, we immediately release new incoming (from http) connections
+ *
+ * During the transition from READY to SHUTTING_DOWN, we flush the pending acquisition queue (with failure callbacks)
+ * and since we disallow new acquires, pending_acquisition_count should always be zero after the transition.
+ *
+ */
+struct aws_http_connection_manager {
+ struct aws_allocator *allocator;
+
+ /*
+ * A union of external downstream dependencies (primarily global http API functions) and
+ * internal implementation references. Selectively overridden by tests in order to
+ * enable strong coverage of internal implementation details.
+ */
+ const struct aws_http_connection_manager_system_vtable *system_vtable;
+
+ /*
+ * Callback to invoke when shutdown has completed and all resources have been cleaned up.
+ */
+ aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback;
+
+ /*
+ * User data to pass to the shutdown completion callback.
+ */
+ void *shutdown_complete_user_data;
+
+ /*
+ * Controls access to all mutable state on the connection manager
+ */
+ struct aws_mutex lock;
+
+ /*
+ * A manager can be in one of two states, READY or SHUTTING_DOWN. The state transition
+ * takes place when ref_count drops to zero.
+ */
+ enum aws_http_connection_manager_state_type state;
+
+ /*
+ * The number of all established, idle connections. So
+ * that we don't have compute the size of a linked list every time.
+ * It doesn't contribute to internal refcount as AWS_HCMCT_OPEN_CONNECTION includes all idle connections as well.
+ */
+ size_t idle_connection_count;
+
+ /*
+ * The set of all available, ready-to-be-used connections, as aws_idle_connection structs.
+ *
+ * This must be a LIFO stack. When connections are released by the user, they must be added on to the back.
+ * When we vend connections to the user, they must be removed from the back first.
+ * In this way, the list will always be sorted from oldest (in terms of time spent idle) to newest. This means
+ * we can always use the cull timestamp of the front connection as the next scheduled time for culling.
+ * It also means that when we cull connections, we can quit the loop as soon as we find a connection
+ * whose timestamp is greater than the current timestamp.
+ */
+ struct aws_linked_list idle_connections;
+
+ /*
+ * The set of all incomplete connection acquisition requests
+ */
+ struct aws_linked_list pending_acquisitions;
+
+ /*
+ * The number of all incomplete connection acquisition requests. So
+ * that we don't have compute the size of a linked list every time.
+ */
+ size_t pending_acquisition_count;
+
+ /*
+ * Counts that contributes to the internal refcount.
+ * When the value changes, s_connection_manager_internal_ref_increase/decrease needed.
+ *
+ * AWS_HCMCT_VENDED_CONNECTION: The number of connections currently being used by external users.
+ * AWS_HCMCT_PENDING_CONNECTIONS: The number of pending new connection requests we have outstanding to the http
+ * layer.
+ * AWS_HCMCT_OPEN_CONNECTION: Always equal to # of connection shutdown callbacks not yet invoked
+ * or equivalently:
+ *
+ * # of connections ever created by the manager - # shutdown callbacks received
+ */
+ size_t internal_ref[AWS_HCMCT_COUNT];
+
+ /*
+ * The number of established new HTTP/2 connections we have waiting for SETTINGS from the http layer
+ * It doesn't contribute to internal refcount as AWS_HCMCT_OPEN_CONNECTION inclues all connections waiting for
+ * settings as well.
+ */
+ size_t pending_settings_count;
+
+ /*
+ * All the options needed to create an http connection
+ */
+ struct aws_client_bootstrap *bootstrap;
+ size_t initial_window_size;
+ struct aws_socket_options socket_options;
+ struct aws_tls_connection_options *tls_connection_options;
+ struct aws_http_proxy_config *proxy_config;
+ struct aws_http_connection_monitoring_options monitoring_options;
+ struct aws_string *host;
+ struct proxy_env_var_settings proxy_ev_settings;
+ struct aws_tls_connection_options *proxy_ev_tls_options;
+ uint16_t port;
+ /*
+ * HTTP/2 specific.
+ */
+ bool http2_prior_knowledge;
+ struct aws_array_list *initial_settings;
+ size_t max_closed_streams;
+ bool http2_conn_manual_window_management;
+
+ /*
+ * The maximum number of connections this manager should ever have at once.
+ */
+ size_t max_connections;
+
+ /*
+ * Lifecycle tracking for the connection manager. Starts at 1.
+ *
+ * Once this drops to zero, the manager state transitions to shutting down
+ *
+ * The manager is deleted when all other tracking counters have returned to zero.
+ *
+ * We don't use an atomic here because the shutdown phase wants to check many different
+ * values. You could argue that we could use a sum of everything, but we still need the
+ * individual values for proper behavior and error checking during the ready state. Also,
+ * a hybrid atomic/lock solution felt excessively complicated and delicate.
+ */
+ size_t external_ref_count;
+
+ /*
+ * Internal refcount that keeps connection manager alive.
+ *
+ * It's a sum of all internal_ref, the `struct aws_connection_management_transaction` alive and one for any external
+ * usage.
+ *
+ * Once this refcount drops to zero, connection manager should either be cleaned up all the memory all waiting for
+ * the last task to clean un the memory and do nothing else.
+ */
+ struct aws_ref_count internal_ref_count;
+
+ /*
+ * if set to true, read back pressure mechanism will be enabled.
+ */
+ bool enable_read_back_pressure;
+
+ /**
+ * If set to a non-zero value, then connections that stay in the pool longer than the specified
+ * timeout will be closed automatically.
+ */
+ uint64_t max_connection_idle_in_milliseconds;
+
+ /*
+ * Task to cull idle connections. This task is run periodically on the cull_event_loop if a non-zero
+ * culling time interval is specified.
+ */
+ struct aws_task *cull_task;
+ struct aws_event_loop *cull_event_loop;
+};
+
+struct aws_http_connection_manager_snapshot {
+ enum aws_http_connection_manager_state_type state;
+
+ size_t idle_connection_count;
+ size_t pending_acquisition_count;
+ size_t pending_settings_count;
+
+ /* From internal_ref */
+ size_t pending_connects_count;
+ size_t vended_connection_count;
+ size_t open_connection_count;
+
+ size_t external_ref_count;
+};
+
+/*
+ * Correct usage requires AWS_ZERO_STRUCT to have been called beforehand.
+ */
+static void s_aws_http_connection_manager_get_snapshot(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection_manager_snapshot *snapshot) {
+
+ snapshot->state = manager->state;
+ snapshot->idle_connection_count = manager->idle_connection_count;
+ snapshot->pending_acquisition_count = manager->pending_acquisition_count;
+ snapshot->pending_settings_count = manager->pending_settings_count;
+
+ snapshot->pending_connects_count = manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS];
+ snapshot->vended_connection_count = manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION];
+ snapshot->open_connection_count = manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION];
+
+ snapshot->external_ref_count = manager->external_ref_count;
+}
+
+static void s_aws_http_connection_manager_log_snapshot(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection_manager_snapshot *snapshot) {
+ if (snapshot->state != AWS_HCMST_UNINITIALIZED) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: snapshot - state=%d, idle_connection_count=%zu, pending_acquire_count=%zu, "
+ "pending_settings_count=%zu, pending_connect_count=%zu, vended_connection_count=%zu, "
+ "open_connection_count=%zu, ref_count=%zu",
+ (void *)manager,
+ (int)snapshot->state,
+ snapshot->idle_connection_count,
+ snapshot->pending_acquisition_count,
+ snapshot->pending_settings_count,
+ snapshot->pending_connects_count,
+ snapshot->vended_connection_count,
+ snapshot->open_connection_count,
+ snapshot->external_ref_count);
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: snapshot not initialized by control flow", (void *)manager);
+ }
+}
+
+void aws_http_connection_manager_set_system_vtable(
+ struct aws_http_connection_manager *manager,
+ const struct aws_http_connection_manager_system_vtable *system_vtable) {
+ AWS_FATAL_ASSERT(aws_http_connection_manager_system_vtable_is_valid(system_vtable));
+
+ manager->system_vtable = system_vtable;
+}
+
+/*
+ * A struct that functions as both the pending acquisition tracker and the about-to-complete data.
+ *
+ * The list in the connection manager (pending_acquisitions) is the set of all acquisition requests that we
+ * haven't yet resolved.
+ *
+ * In order to make sure we never invoke callbacks while holding the manager's lock, in a number of places
+ * we build a list of one or more acquisitions to complete. Once the lock is released
+ * we complete all the acquisitions in the list using the data within the struct (hence why we have
+ * "result-oriented" members like connection and error_code). This means we can fail an acquisition
+ * simply by setting the error_code and moving it to the current transaction's completion list.
+ */
+struct aws_http_connection_acquisition {
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
+ struct aws_http_connection_manager *manager; /* Only used by logging */
+ aws_http_connection_manager_on_connection_setup_fn *callback;
+ void *user_data;
+ struct aws_http_connection *connection;
+ int error_code;
+ struct aws_channel_task acquisition_task;
+};
+
+static void s_connection_acquisition_task(
+ struct aws_channel_task *channel_task,
+ void *arg,
+ enum aws_task_status status) {
+ (void)channel_task;
+
+ struct aws_http_connection_acquisition *pending_acquisition = arg;
+
+ /* this is a channel task. If it is canceled, that means the channel shutdown. In that case, that's equivalent
+ * to a closed connection. */
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failed to complete connection acquisition because the connection was closed",
+ (void *)pending_acquisition->manager);
+ pending_acquisition->callback(NULL, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_acquisition->user_data);
+ /* release it back to prevent a leak of the connection count. */
+ aws_http_connection_manager_release_connection(pending_acquisition->manager, pending_acquisition->connection);
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Successfully completed connection acquisition with connection id=%p",
+ (void *)pending_acquisition->manager,
+ (void *)pending_acquisition->connection);
+ pending_acquisition->callback(
+ pending_acquisition->connection, pending_acquisition->error_code, pending_acquisition->user_data);
+ }
+
+ aws_mem_release(pending_acquisition->allocator, pending_acquisition);
+}
+
+/*
+ * Invokes a set of connection acquisition completion callbacks.
+ *
+ * Soft Requirement: The manager's lock must not be held in the callstack.
+ *
+ * Assumes that internal state (like pending_acquisition_count, vended_connection_count, etc...) have already been
+ * updated according to the list's contents.
+ */
+static void s_aws_http_connection_manager_complete_acquisitions(
+ struct aws_linked_list *acquisitions,
+ struct aws_allocator *allocator) {
+
+ while (!aws_linked_list_empty(acquisitions)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(acquisitions);
+ struct aws_http_connection_acquisition *pending_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_http_connection_acquisition, node);
+
+ if (pending_acquisition->error_code == AWS_OP_SUCCESS) {
+
+ struct aws_channel *channel =
+ pending_acquisition->manager->system_vtable->connection_get_channel(pending_acquisition->connection);
+ AWS_PRECONDITION(channel);
+
+ /* For some workloads, going ahead and moving the connection callback to the connection's thread is a
+ * substantial performance improvement so let's do that */
+ if (!pending_acquisition->manager->system_vtable->is_callers_thread(channel)) {
+ aws_channel_task_init(
+ &pending_acquisition->acquisition_task,
+ s_connection_acquisition_task,
+ pending_acquisition,
+ "s_connection_acquisition_task");
+ aws_channel_schedule_task_now(channel, &pending_acquisition->acquisition_task);
+ return;
+ }
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Successfully completed connection acquisition with connection id=%p",
+ (void *)pending_acquisition->manager,
+ (void *)pending_acquisition->connection);
+
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failed to complete connection acquisition with error_code %d(%s)",
+ (void *)pending_acquisition->manager,
+ pending_acquisition->error_code,
+ aws_error_str(pending_acquisition->error_code));
+ }
+
+ pending_acquisition->callback(
+ pending_acquisition->connection, pending_acquisition->error_code, pending_acquisition->user_data);
+ aws_mem_release(allocator, pending_acquisition);
+ }
+}
+
+/*
+ * Moves the first pending connection acquisition into a (task set) list. Call this while holding the lock to
+ * build the set of callbacks to be completed once the lock is released.
+ *
+ * Hard Requirement: Manager's lock must held somewhere in the call stack
+ *
+ * If this was a successful acquisition then connection is non-null
+ * If this was a failed acquisition then connection is null and error_code is hopefully a useful diagnostic (extreme
+ * edge cases exist where it may not be though)
+ */
+static void s_aws_http_connection_manager_move_front_acquisition(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection *connection,
+ int error_code,
+ struct aws_linked_list *output_list) {
+
+ AWS_FATAL_ASSERT(!aws_linked_list_empty(&manager->pending_acquisitions));
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&manager->pending_acquisitions);
+
+ AWS_FATAL_ASSERT(manager->pending_acquisition_count > 0);
+ --manager->pending_acquisition_count;
+
+ if (error_code == AWS_ERROR_SUCCESS && connection == NULL) {
+ AWS_LOGF_FATAL(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Connection acquisition completed with NULL connection and no error code. Investigate.",
+ (void *)manager);
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ struct aws_http_connection_acquisition *pending_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_http_connection_acquisition, node);
+ pending_acquisition->connection = connection;
+ pending_acquisition->error_code = error_code;
+
+ aws_linked_list_push_back(output_list, node);
+}
+
+/*
+ * Encompasses all of the external operations that need to be done for various
+ * events:
+ * manager release
+ * connection release
+ * connection acquire
+ * connection_setup
+ * connection_shutdown
+ *
+ * The transaction is built under the manager's lock (and the internal state is updated optimistically),
+ * but then executed outside of it.
+ */
+struct aws_connection_management_transaction {
+ struct aws_http_connection_manager *manager;
+ struct aws_allocator *allocator;
+ struct aws_linked_list completions;
+ struct aws_http_connection *connection_to_release;
+ struct aws_linked_list connections_to_release; /* <struct aws_idle_connection> */
+ struct aws_http_connection_manager_snapshot snapshot;
+ size_t new_connections;
+};
+
+static void s_aws_connection_management_transaction_init(
+ struct aws_connection_management_transaction *work,
+ struct aws_http_connection_manager *manager) {
+ AWS_ZERO_STRUCT(*work);
+
+ aws_linked_list_init(&work->connections_to_release);
+ aws_linked_list_init(&work->completions);
+ work->manager = manager;
+ work->allocator = manager->allocator;
+ aws_ref_count_acquire(&manager->internal_ref_count);
+}
+
+static void s_aws_connection_management_transaction_clean_up(struct aws_connection_management_transaction *work) {
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&work->connections_to_release));
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&work->completions));
+ AWS_ASSERT(work->manager);
+ aws_ref_count_release(&work->manager->internal_ref_count);
+}
+
+/* The count acquire and release all needs to be invoked helding the lock */
+static void s_connection_manager_internal_ref_increase(
+ struct aws_http_connection_manager *manager,
+ enum aws_http_connection_manager_count_type count_type,
+ size_t num) {
+
+ manager->internal_ref[count_type] += num;
+ for (size_t i = 0; i < num; i++) {
+ aws_ref_count_acquire(&manager->internal_ref_count);
+ }
+}
+
+static void s_connection_manager_internal_ref_decrease(
+ struct aws_http_connection_manager *manager,
+ enum aws_http_connection_manager_count_type count_type,
+ size_t num) {
+
+ manager->internal_ref[count_type] -= num;
+ for (size_t i = 0; i < num; i++) {
+ /* This only happens between transcation init and transcation clean up. As transcation always has a internal
+ * refcount, this will never bring the refcount to zero */
+ aws_ref_count_release(&manager->internal_ref_count);
+ }
+}
+
+/* Only invoked with the lock held */
+static void s_aws_http_connection_manager_build_transaction(struct aws_connection_management_transaction *work) {
+ struct aws_http_connection_manager *manager = work->manager;
+
+ if (manager->state == AWS_HCMST_READY) {
+ /*
+ * Step 1 - If there's free connections, complete acquisition requests
+ */
+ while (!aws_linked_list_empty(&manager->idle_connections) > 0 && manager->pending_acquisition_count > 0) {
+ AWS_FATAL_ASSERT(manager->idle_connection_count >= 1);
+ /*
+ * It is absolutely critical that this is pop_back and not front. By making the idle connections
+ * a LIFO stack, the list will always be sorted from oldest (in terms of idle time) to newest. This means
+ * we can always use the cull timestamp of the first connection as the next scheduled time for culling.
+ * It also means that when we cull connections, we can quit the loop as soon as we find a connection
+ * whose timestamp is greater than the current timestamp.
+ */
+ struct aws_linked_list_node *node = aws_linked_list_pop_back(&manager->idle_connections);
+ struct aws_idle_connection *idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ struct aws_http_connection *connection = idle_connection->connection;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Grabbing pooled connection (%p)",
+ (void *)manager,
+ (void *)connection);
+ s_aws_http_connection_manager_move_front_acquisition(
+ manager, connection, AWS_ERROR_SUCCESS, &work->completions);
+ s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_VENDED_CONNECTION, 1);
+ --manager->idle_connection_count;
+ aws_mem_release(idle_connection->allocator, idle_connection);
+ }
+
+ /*
+ * Step 2 - if there's excess pending acquisitions and we have room to make more, make more
+ */
+ if (manager->pending_acquisition_count >
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count) {
+ AWS_FATAL_ASSERT(
+ manager->max_connections >= manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] +
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] +
+ manager->pending_settings_count);
+
+ work->new_connections = manager->pending_acquisition_count -
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] -
+ manager->pending_settings_count;
+ size_t max_new_connections =
+ manager->max_connections -
+ (manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] +
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count);
+
+ if (work->new_connections > max_new_connections) {
+ work->new_connections = max_new_connections;
+ }
+ s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_PENDING_CONNECTIONS, work->new_connections);
+ }
+ } else {
+ /*
+ * swap our internal connection set with the empty work set
+ */
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&work->connections_to_release));
+ aws_linked_list_swap_contents(&manager->idle_connections, &work->connections_to_release);
+ manager->idle_connection_count = 0;
+
+ /*
+ * Move all manager pending acquisitions to the work completion list
+ */
+ while (!aws_linked_list_empty(&manager->pending_acquisitions)) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failing pending connection acquisition due to manager shut down",
+ (void *)manager);
+ s_aws_http_connection_manager_move_front_acquisition(
+ manager, NULL, AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN, &work->completions);
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: manager release, failing %zu pending acquisitions",
+ (void *)manager,
+ manager->pending_acquisition_count);
+ manager->pending_acquisition_count = 0;
+ }
+
+ s_aws_http_connection_manager_get_snapshot(manager, &work->snapshot);
+}
+
+static void s_aws_http_connection_manager_execute_transaction(struct aws_connection_management_transaction *work);
+
+/*
+ * The final last gasp of a connection manager where memory is cleaned up. Destruction is split up into two parts,
+ * a begin and a finish. Idle connection culling requires a scheduled task on an arbitrary event loop. If idle
+ * connection culling is on then this task must be cancelled before destruction can finish, but you can only cancel
+ * a task from the same event loop that it is scheduled on. To resolve this, when using idle connection culling,
+ * we schedule a finish destruction task on the event loop that the culling task is on. This finish task
+ * cancels the culling task and then calls this function. If we are not using idle connection culling, we can
+ * call this function immediately from the start of destruction.
+ */
+static void s_aws_http_connection_manager_finish_destroy(struct aws_http_connection_manager *manager) {
+ if (manager == NULL) {
+ return;
+ }
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Destroying self", (void *)manager);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] == 0);
+ AWS_FATAL_ASSERT(manager->pending_settings_count == 0);
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] == 0);
+ AWS_FATAL_ASSERT(manager->pending_acquisition_count == 0);
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION] == 0);
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&manager->pending_acquisitions));
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&manager->idle_connections));
+
+ aws_string_destroy(manager->host);
+ if (manager->initial_settings) {
+ aws_array_list_clean_up(manager->initial_settings);
+ aws_mem_release(manager->allocator, manager->initial_settings);
+ }
+ if (manager->tls_connection_options) {
+ aws_tls_connection_options_clean_up(manager->tls_connection_options);
+ aws_mem_release(manager->allocator, manager->tls_connection_options);
+ }
+ if (manager->proxy_ev_tls_options) {
+ aws_tls_connection_options_clean_up(manager->proxy_ev_tls_options);
+ aws_mem_release(manager->allocator, manager->proxy_ev_tls_options);
+ }
+ if (manager->proxy_config) {
+ aws_http_proxy_config_destroy(manager->proxy_config);
+ }
+
+ /*
+ * If this task exists then we are actually in the corresponding event loop running the final destruction task.
+ * In that case, we've already cancelled this task and when you cancel, it runs synchronously. So in that
+ * case the task has run as cancelled, it was not rescheduled, and so we can safely release the memory.
+ */
+ if (manager->cull_task) {
+ aws_mem_release(manager->allocator, manager->cull_task);
+ }
+
+ aws_mutex_clean_up(&manager->lock);
+
+ aws_client_bootstrap_release(manager->bootstrap);
+
+ if (manager->shutdown_complete_callback) {
+ manager->shutdown_complete_callback(manager->shutdown_complete_user_data);
+ }
+
+ aws_mem_release(manager->allocator, manager);
+}
+
+/* This is scheduled to run on the cull task's event loop. Should only be scheduled to run if we have one */
+static void s_final_destruction_task(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)status;
+ struct aws_http_connection_manager *manager = arg;
+ struct aws_allocator *allocator = manager->allocator;
+
+ AWS_FATAL_ASSERT(manager->cull_task != NULL);
+ AWS_FATAL_ASSERT(manager->cull_event_loop != NULL);
+
+ aws_event_loop_cancel_task(manager->cull_event_loop, manager->cull_task);
+ aws_mem_release(allocator, task);
+
+ /* release the refcount on manager as the culling task will not run again */
+ aws_ref_count_release(&manager->internal_ref_count);
+}
+
+static void s_cull_task(struct aws_task *task, void *arg, enum aws_task_status status);
+static void s_schedule_connection_culling(struct aws_http_connection_manager *manager) {
+ if (manager->max_connection_idle_in_milliseconds == 0) {
+ return;
+ }
+
+ if (manager->cull_task == NULL) {
+ manager->cull_task = aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_task));
+ aws_task_init(manager->cull_task, s_cull_task, manager, "cull_idle_connections");
+ /* For the task to properly run and cancel, we need to keep manager alive */
+ aws_ref_count_acquire(&manager->internal_ref_count);
+ }
+
+ if (manager->cull_event_loop == NULL) {
+ manager->cull_event_loop = aws_event_loop_group_get_next_loop(manager->bootstrap->event_loop_group);
+ }
+ AWS_FATAL_ASSERT(manager->cull_event_loop != NULL);
+
+ uint64_t cull_task_time = 0;
+
+ aws_mutex_lock(&manager->lock);
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ struct aws_linked_list_node *oldest_node = aws_linked_list_begin(&manager->idle_connections);
+ if (oldest_node != end) {
+ /*
+ * Since the connections are in LIFO order in the list, the front of the list has the closest
+ * cull time.
+ */
+ struct aws_idle_connection *oldest_idle_connection =
+ AWS_CONTAINER_OF(oldest_node, struct aws_idle_connection, node);
+ cull_task_time = oldest_idle_connection->cull_timestamp;
+ } else {
+ /*
+ * There are no connections in the list, so the absolute minimum anything could be culled is the full
+ * culling interval from now.
+ */
+ uint64_t now = 0;
+ manager->system_vtable->get_monotonic_time(&now);
+ cull_task_time =
+ now + aws_timestamp_convert(
+ manager->max_connection_idle_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ }
+ aws_mutex_unlock(&manager->lock);
+
+ aws_event_loop_schedule_task_future(manager->cull_event_loop, manager->cull_task, cull_task_time);
+
+ return;
+}
+
+struct aws_http_connection_manager *aws_http_connection_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options) {
+
+ aws_http_fatal_assert_library_initialized();
+
+ if (!options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - options is null");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (!options->socket_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - socket_options is null");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (options->max_connections == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - max_connections cannot be 0");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (options->tls_connection_options && options->http2_prior_knowledge) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - HTTP/2 prior knowledge cannot be set when TLS is used");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_connection_manager *manager =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_connection_manager));
+ if (manager == NULL) {
+ return NULL;
+ }
+
+ manager->allocator = allocator;
+
+ if (aws_mutex_init(&manager->lock)) {
+ goto on_error;
+ }
+
+ aws_ref_count_init(
+ &manager->internal_ref_count,
+ manager,
+ (aws_simple_completion_callback *)s_aws_http_connection_manager_finish_destroy);
+
+ aws_linked_list_init(&manager->idle_connections);
+ aws_linked_list_init(&manager->pending_acquisitions);
+
+ manager->host = aws_string_new_from_cursor(allocator, &options->host);
+ if (manager->host == NULL) {
+ goto on_error;
+ }
+
+ if (options->tls_connection_options) {
+ manager->tls_connection_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(manager->tls_connection_options, options->tls_connection_options)) {
+ goto on_error;
+ }
+ }
+ if (options->proxy_options) {
+ manager->proxy_config = aws_http_proxy_config_new_from_manager_options(allocator, options);
+ if (manager->proxy_config == NULL) {
+ goto on_error;
+ }
+ }
+
+ if (options->monitoring_options) {
+ manager->monitoring_options = *options->monitoring_options;
+ }
+
+ manager->state = AWS_HCMST_READY;
+ manager->initial_window_size = options->initial_window_size;
+ manager->port = options->port;
+ manager->max_connections = options->max_connections;
+ manager->socket_options = *options->socket_options;
+ manager->bootstrap = aws_client_bootstrap_acquire(options->bootstrap);
+ manager->system_vtable = g_aws_http_connection_manager_default_system_vtable_ptr;
+ manager->external_ref_count = 1;
+ manager->shutdown_complete_callback = options->shutdown_complete_callback;
+ manager->shutdown_complete_user_data = options->shutdown_complete_user_data;
+ manager->enable_read_back_pressure = options->enable_read_back_pressure;
+ manager->max_connection_idle_in_milliseconds = options->max_connection_idle_in_milliseconds;
+ if (options->proxy_ev_settings) {
+ manager->proxy_ev_settings = *options->proxy_ev_settings;
+ }
+ if (manager->proxy_ev_settings.tls_options) {
+ manager->proxy_ev_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(manager->proxy_ev_tls_options, manager->proxy_ev_settings.tls_options)) {
+ goto on_error;
+ }
+ manager->proxy_ev_settings.tls_options = manager->proxy_ev_tls_options;
+ }
+ manager->http2_prior_knowledge = options->http2_prior_knowledge;
+ if (options->num_initial_settings > 0) {
+ manager->initial_settings = aws_mem_calloc(allocator, 1, sizeof(struct aws_array_list));
+ aws_array_list_init_dynamic(
+ manager->initial_settings, allocator, options->num_initial_settings, sizeof(struct aws_http2_setting));
+ memcpy(
+ manager->initial_settings->data,
+ options->initial_settings_array,
+ options->num_initial_settings * sizeof(struct aws_http2_setting));
+ }
+ manager->max_closed_streams = options->max_closed_streams;
+ manager->http2_conn_manual_window_management = options->http2_conn_manual_window_management;
+
+ /* NOTHING can fail after here */
+ s_schedule_connection_culling(manager);
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Successfully created", (void *)manager);
+
+ return manager;
+
+on_error:
+
+ s_aws_http_connection_manager_finish_destroy(manager);
+
+ return NULL;
+}
+
+void aws_http_connection_manager_acquire(struct aws_http_connection_manager *manager) {
+ aws_mutex_lock(&manager->lock);
+ AWS_FATAL_ASSERT(manager->external_ref_count > 0);
+ manager->external_ref_count += 1;
+ aws_mutex_unlock(&manager->lock);
+}
+
+void aws_http_connection_manager_release(struct aws_http_connection_manager *manager) {
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: release", (void *)manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ if (manager->external_ref_count > 0) {
+ manager->external_ref_count -= 1;
+
+ if (manager->external_ref_count == 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: ref count now zero, starting shut down process",
+ (void *)manager);
+ manager->state = AWS_HCMST_SHUTTING_DOWN;
+ s_aws_http_connection_manager_build_transaction(&work);
+ if (manager->cull_task != NULL) {
+ /* When manager shutting down, schedule the task to cancel the cull task if exist. */
+ AWS_FATAL_ASSERT(manager->cull_event_loop);
+ struct aws_task *final_destruction_task =
+ aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_task));
+ aws_task_init(final_destruction_task, s_final_destruction_task, manager, "final_scheduled_destruction");
+ aws_event_loop_schedule_task_now(manager->cull_event_loop, final_destruction_task);
+ }
+ aws_ref_count_release(&manager->internal_ref_count);
+ }
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Connection manager release called with a zero reference count",
+ (void *)manager);
+ }
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_aws_http_connection_manager_on_connection_setup(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+static void s_aws_http_connection_manager_on_connection_shutdown(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+static void s_aws_http_connection_manager_h2_on_goaway_received(
+ struct aws_http_connection *http2_connection,
+ uint32_t last_stream_id,
+ uint32_t http2_error_code,
+ struct aws_byte_cursor debug_data,
+ void *user_data);
+
+static void s_aws_http_connection_manager_h2_on_initial_settings_completed(
+ struct aws_http_connection *http2_connection,
+ int error_code,
+ void *user_data);
+
+static int s_aws_http_connection_manager_new_connection(struct aws_http_connection_manager *manager) {
+ struct aws_http_client_connection_options options;
+ AWS_ZERO_STRUCT(options);
+ options.self_size = sizeof(struct aws_http_client_connection_options);
+ options.bootstrap = manager->bootstrap;
+ options.tls_options = manager->tls_connection_options;
+ options.allocator = manager->allocator;
+ options.user_data = manager;
+ options.host_name = aws_byte_cursor_from_string(manager->host);
+ options.port = manager->port;
+ options.initial_window_size = manager->initial_window_size;
+ options.socket_options = &manager->socket_options;
+ options.on_setup = s_aws_http_connection_manager_on_connection_setup;
+ options.on_shutdown = s_aws_http_connection_manager_on_connection_shutdown;
+ options.manual_window_management = manager->enable_read_back_pressure;
+ options.proxy_ev_settings = &manager->proxy_ev_settings;
+ options.prior_knowledge_http2 = manager->http2_prior_knowledge;
+
+ struct aws_http2_connection_options h2_options;
+ AWS_ZERO_STRUCT(h2_options);
+ if (manager->initial_settings) {
+ h2_options.initial_settings_array = manager->initial_settings->data;
+ h2_options.num_initial_settings = aws_array_list_length(manager->initial_settings);
+ }
+ h2_options.max_closed_streams = manager->max_closed_streams;
+ h2_options.conn_manual_window_management = manager->http2_conn_manual_window_management;
+ /* The initial_settings_completed invoked after the other side acknowledges it, and will always be invoked if the
+ * connection set up */
+ h2_options.on_initial_settings_completed = s_aws_http_connection_manager_h2_on_initial_settings_completed;
+ h2_options.on_goaway_received = s_aws_http_connection_manager_h2_on_goaway_received;
+
+ options.http2_options = &h2_options;
+
+ if (aws_http_connection_monitoring_options_is_valid(&manager->monitoring_options)) {
+ options.monitoring_options = &manager->monitoring_options;
+ }
+
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+
+ if (manager->proxy_config) {
+ aws_http_proxy_options_init_from_config(&proxy_options, manager->proxy_config);
+ options.proxy_options = &proxy_options;
+ }
+
+ if (manager->system_vtable->create_connection(&options)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: http connection creation failed with error code %d(%s)",
+ (void *)manager,
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_http_connection_manager_execute_transaction(struct aws_connection_management_transaction *work) {
+
+ struct aws_http_connection_manager *manager = work->manager;
+
+ int representative_error = 0;
+ size_t new_connection_failures = 0;
+
+ /*
+ * Step 1 - Logging
+ */
+ s_aws_http_connection_manager_log_snapshot(manager, &work->snapshot);
+
+ /*
+ * Step 2 - Perform any requested connection releases
+ */
+ while (!aws_linked_list_empty(&work->connections_to_release)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_back(&work->connections_to_release);
+ struct aws_idle_connection *idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Releasing connection (id=%p)",
+ (void *)manager,
+ (void *)idle_connection->connection);
+ manager->system_vtable->release_connection(idle_connection->connection);
+ aws_mem_release(idle_connection->allocator, idle_connection);
+ }
+
+ if (work->connection_to_release) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Releasing connection (id=%p)",
+ (void *)manager,
+ (void *)work->connection_to_release);
+ manager->system_vtable->release_connection(work->connection_to_release);
+ }
+
+ /*
+ * Step 3 - Make new connections
+ */
+ struct aws_array_list errors;
+ AWS_ZERO_STRUCT(errors);
+ /* Even if we can't init this array, we still need to invoke error callbacks properly */
+ bool push_errors = false;
+
+ if (work->new_connections > 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Requesting %zu new connections from http",
+ (void *)manager,
+ work->new_connections);
+ push_errors = aws_array_list_init_dynamic(&errors, work->allocator, work->new_connections, sizeof(int)) ==
+ AWS_ERROR_SUCCESS;
+ }
+
+ for (size_t i = 0; i < work->new_connections; ++i) {
+ if (s_aws_http_connection_manager_new_connection(manager)) {
+ ++new_connection_failures;
+ representative_error = aws_last_error();
+ if (push_errors) {
+ AWS_FATAL_ASSERT(aws_array_list_push_back(&errors, &representative_error) == AWS_OP_SUCCESS);
+ }
+ }
+ }
+
+ if (new_connection_failures > 0) {
+ /*
+ * We failed and aren't going to receive a callback, but the current state assumes we will receive
+ * a callback. So we need to re-lock and update the state ourselves.
+ */
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] >= new_connection_failures);
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_PENDING_CONNECTIONS, new_connection_failures);
+
+ /*
+ * Rather than failing one acquisition for each connection failure, if there's at least one
+ * connection failure, we instead fail all excess acquisitions, since there's no pending
+ * connect that will necessarily resolve them.
+ *
+ * Try to correspond an error with the acquisition failure, but as a fallback just use the
+ * representative error.
+ */
+ size_t i = 0;
+ while (manager->pending_acquisition_count > manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS]) {
+ int error = representative_error;
+ if (i < aws_array_list_length(&errors)) {
+ aws_array_list_get_at(&errors, &error, i);
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failing excess connection acquisition with error code %d",
+ (void *)manager,
+ (int)error);
+ s_aws_http_connection_manager_move_front_acquisition(manager, NULL, error, &work->completions);
+ ++i;
+ }
+
+ aws_mutex_unlock(&manager->lock);
+ }
+
+ /*
+ * Step 4 - Perform acquisition callbacks
+ */
+ s_aws_http_connection_manager_complete_acquisitions(&work->completions, work->allocator);
+
+ aws_array_list_clean_up(&errors);
+
+ /*
+ * Step 5 - Clean up work. Do this here rather than at the end of every caller. Destroy the manager if necessary
+ */
+ s_aws_connection_management_transaction_clean_up(work);
+}
+
+void aws_http_connection_manager_acquire_connection(
+ struct aws_http_connection_manager *manager,
+ aws_http_connection_manager_on_connection_setup_fn *callback,
+ void *user_data) {
+
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Acquire connection", (void *)manager);
+
+ struct aws_http_connection_acquisition *request =
+ aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_http_connection_acquisition));
+
+ request->allocator = manager->allocator;
+ request->callback = callback;
+ request->user_data = user_data;
+ request->manager = manager;
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ /* It's a use after free crime, we don't want to handle */
+ AWS_FATAL_ASSERT(manager->state == AWS_HCMST_READY);
+
+ aws_linked_list_push_back(&manager->pending_acquisitions, &request->node);
+ ++manager->pending_acquisition_count;
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+/* Only invoke with lock held */
+static int s_idle_connection(struct aws_http_connection_manager *manager, struct aws_http_connection *connection) {
+ struct aws_idle_connection *idle_connection =
+ aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_idle_connection));
+
+ idle_connection->allocator = manager->allocator;
+ idle_connection->connection = connection;
+
+ uint64_t idle_start_timestamp = 0;
+ if (manager->system_vtable->get_monotonic_time(&idle_start_timestamp)) {
+ goto on_error;
+ }
+
+ idle_connection->cull_timestamp =
+ idle_start_timestamp +
+ aws_timestamp_convert(
+ manager->max_connection_idle_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+
+ aws_linked_list_push_back(&manager->idle_connections, &idle_connection->node);
+ ++manager->idle_connection_count;
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_mem_release(idle_connection->allocator, idle_connection);
+
+ return AWS_OP_ERR;
+}
+
+int aws_http_connection_manager_release_connection(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection *connection) {
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ int result = AWS_OP_ERR;
+ bool should_release_connection = !manager->system_vtable->is_connection_available(connection);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: User releasing connection (id=%p)",
+ (void *)manager,
+ (void *)connection);
+
+ aws_mutex_lock(&manager->lock);
+
+ /* We're probably hosed in this case, but let's not underflow */
+ if (manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] == 0) {
+ AWS_LOGF_FATAL(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Connection released when vended connection count is zero",
+ (void *)manager);
+ aws_raise_error(AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW);
+ goto release;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_VENDED_CONNECTION, 1);
+
+ if (!should_release_connection) {
+ if (s_idle_connection(manager, connection)) {
+ should_release_connection = true;
+ }
+ }
+
+ s_aws_http_connection_manager_build_transaction(&work);
+ if (should_release_connection) {
+ work.connection_to_release = connection;
+ }
+
+release:
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+
+ return result;
+}
+
+static void s_aws_http_connection_manager_h2_on_goaway_received(
+ struct aws_http_connection *http2_connection,
+ uint32_t last_stream_id,
+ uint32_t http2_error_code,
+ struct aws_byte_cursor debug_data,
+ void *user_data) {
+ struct aws_http_connection_manager *manager = user_data;
+ /* We don't offer user the details, but we can still log it out for debugging */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: HTTP/2 connection (id=%p) received GOAWAY with: last stream id - %u, error code - %u, debug data - "
+ "\"%.*s\"",
+ (void *)manager,
+ (void *)http2_connection,
+ last_stream_id,
+ http2_error_code,
+ (int)debug_data.len,
+ debug_data.ptr);
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+ /* Goaway received, remove the connection from idle and release it, if it's there. But, not decrease the
+ * open_connection_count as the shutdown callback will be invoked, we still need the manager to be alive */
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ for (struct aws_linked_list_node *node = aws_linked_list_begin(&manager->idle_connections); node != end;
+ node = aws_linked_list_next(node)) {
+ struct aws_idle_connection *current_idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ if (current_idle_connection->connection == http2_connection) {
+ aws_linked_list_remove(node);
+ work.connection_to_release = http2_connection;
+ aws_mem_release(current_idle_connection->allocator, current_idle_connection);
+ --manager->idle_connection_count;
+ break;
+ }
+ }
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+/* Only invoke with lock held */
+static void s_cm_on_connection_ready_or_failed(
+ struct aws_http_connection_manager *manager,
+ int error_code,
+ struct aws_http_connection *connection,
+ struct aws_connection_management_transaction *work) {
+
+ bool is_shutting_down = manager->state == AWS_HCMST_SHUTTING_DOWN;
+
+ if (!error_code) {
+ if (is_shutting_down || s_idle_connection(manager, connection)) {
+ /*
+ * release it immediately
+ */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: New connection (id=%p) releasing immediately",
+ (void *)manager,
+ (void *)connection);
+ work->connection_to_release = connection;
+ }
+ } else {
+ /* fail acquisition as one connection cannot be used any more */
+ while (manager->pending_acquisition_count >
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failing excess connection acquisition with error code %d",
+ (void *)manager,
+ (int)error_code);
+ s_aws_http_connection_manager_move_front_acquisition(manager, NULL, error_code, &work->completions);
+ }
+ /* Since the connection never being idle, we need to release the connection here. */
+ if (connection) {
+ work->connection_to_release = connection;
+ }
+ }
+}
+
+static void s_aws_http_connection_manager_h2_on_initial_settings_completed(
+ struct aws_http_connection *http2_connection,
+ int error_code,
+ void *user_data) {
+ struct aws_http_connection_manager *manager = user_data;
+ /* The other side acknowledge about the settings which also means we received the settings from other side at this
+ * point, because the settings should be the fist frame to be sent */
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: HTTP/2 connection (id=%p) completed initial settings",
+ (void *)manager,
+ (void *)http2_connection);
+
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->pending_settings_count > 0);
+ --manager->pending_settings_count;
+ s_cm_on_connection_ready_or_failed(manager, error_code, http2_connection, &work);
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_aws_http_connection_manager_on_connection_setup(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+ struct aws_http_connection_manager *manager = user_data;
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ if (connection != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Received new connection (id=%p) from http layer",
+ (void *)manager,
+ (void *)connection);
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failed to obtain new connection from http layer, error %d(%s)",
+ (void *)manager,
+ error_code,
+ aws_error_str(error_code));
+ }
+
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] > 0);
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_PENDING_CONNECTIONS, 1);
+ if (!error_code) {
+ /* Shutdown will not be invoked if setup completed with error */
+ s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_OPEN_CONNECTION, 1);
+ }
+
+ if (connection != NULL && manager->system_vtable->connection_get_version(connection) == AWS_HTTP_VERSION_2) {
+ /* If the manager is shutting down, we will still wait for the settings, since we don't have map for connections
+ */
+ ++manager->pending_settings_count;
+ /* For http/2 connection, we vent the connection after the initial settings completed for the user to make
+ * sure the connection is really ready to use. So, we can revert the counting and act like nothing happens
+ * here and wait for the on_initial_settings_completed, which will ALWAYS be invoked before shutdown. BUT,
+ * we increase the open_connection_count, as the shutdown will be invoked no matter what happens. */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: New HTTP/2 connection (id=%p) set up, waiting for initial settings to complete",
+ (void *)manager,
+ (void *)connection);
+ } else {
+ /* If there is no connection, error code cannot be zero */
+ AWS_ASSERT(connection || error_code);
+ s_cm_on_connection_ready_or_failed(manager, error_code, connection, &work);
+ }
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_aws_http_connection_manager_on_connection_shutdown(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+ (void)error_code;
+
+ struct aws_http_connection_manager *manager = user_data;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: shutdown received for connection (id=%p)",
+ (void *)manager,
+ (void *)connection);
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION] > 0);
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_OPEN_CONNECTION, 1);
+
+ /*
+ * Find and, if found, remove it from idle connections
+ */
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ for (struct aws_linked_list_node *node = aws_linked_list_begin(&manager->idle_connections); node != end;
+ node = aws_linked_list_next(node)) {
+ struct aws_idle_connection *current_idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ if (current_idle_connection->connection == connection) {
+ aws_linked_list_remove(node);
+ work.connection_to_release = connection;
+ aws_mem_release(current_idle_connection->allocator, current_idle_connection);
+ --manager->idle_connection_count;
+ break;
+ }
+ }
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_cull_idle_connections(struct aws_http_connection_manager *manager) {
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: culling idle connections", (void *)manager);
+
+ if (manager == NULL || manager->max_connection_idle_in_milliseconds == 0) {
+ return;
+ }
+
+ uint64_t now = 0;
+ if (manager->system_vtable->get_monotonic_time(&now)) {
+ return;
+ }
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ /* Only if we're not shutting down */
+ if (manager->state == AWS_HCMST_READY) {
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ struct aws_linked_list_node *current_node = aws_linked_list_begin(&manager->idle_connections);
+ while (current_node != end) {
+ struct aws_linked_list_node *node = current_node;
+ struct aws_idle_connection *current_idle_connection =
+ AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ if (current_idle_connection->cull_timestamp > now) {
+ break;
+ }
+
+ current_node = aws_linked_list_next(current_node);
+ aws_linked_list_remove(node);
+ aws_linked_list_push_back(&work.connections_to_release, node);
+ --manager->idle_connection_count;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: culling idle connection (%p)",
+ (void *)manager,
+ (void *)current_idle_connection->connection);
+ }
+ }
+
+ s_aws_http_connection_manager_get_snapshot(manager, &work.snapshot);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_cull_task(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_http_connection_manager *manager = arg;
+
+ s_cull_idle_connections(manager);
+
+ s_schedule_connection_culling(manager);
+}
+
+void aws_http_connection_manager_fetch_metrics(
+ const struct aws_http_connection_manager *manager,
+ struct aws_http_manager_metrics *out_metrics) {
+ AWS_PRECONDITION(manager);
+ AWS_PRECONDITION(out_metrics);
+
+ AWS_FATAL_ASSERT(aws_mutex_lock((struct aws_mutex *)(void *)&manager->lock) == AWS_OP_SUCCESS);
+ out_metrics->available_concurrency = manager->idle_connection_count;
+ out_metrics->pending_concurrency_acquires = manager->pending_acquisition_count;
+ out_metrics->leased_concurrency = manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION];
+ AWS_FATAL_ASSERT(aws_mutex_unlock((struct aws_mutex *)(void *)&manager->lock) == AWS_OP_SUCCESS);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/connection_monitor.c b/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
new file mode 100644
index 00000000000..2732325512c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
@@ -0,0 +1,235 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/connection_monitor.h>
+
+#include <aws/http/connection.h>
+#include <aws/http/statistics.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+#include <aws/io/statistics.h>
+
+#include <aws/common/clock.h>
+
+#include <inttypes.h>
+
+static void s_process_statistics(
+ struct aws_crt_statistics_handler *handler,
+ struct aws_crt_statistics_sample_interval *interval,
+ struct aws_array_list *stats_list,
+ void *context) {
+
+ (void)interval;
+
+ struct aws_statistics_handler_http_connection_monitor_impl *impl = handler->impl;
+ if (!aws_http_connection_monitoring_options_is_valid(&impl->options)) {
+ return;
+ }
+
+ uint64_t pending_read_interval_ms = 0;
+ uint64_t pending_write_interval_ms = 0;
+ uint64_t bytes_read = 0;
+ uint64_t bytes_written = 0;
+ uint32_t h1_current_outgoing_stream_id = 0;
+ uint32_t h1_current_incoming_stream_id = 0;
+
+ /*
+ * Pull out the data needed to perform the throughput calculation
+ */
+ size_t stats_count = aws_array_list_length(stats_list);
+ bool h2 = false;
+ bool h2_was_inactive = false;
+
+ for (size_t i = 0; i < stats_count; ++i) {
+ struct aws_crt_statistics_base *stats_base = NULL;
+ if (aws_array_list_get_at(stats_list, &stats_base, i)) {
+ continue;
+ }
+
+ switch (stats_base->category) {
+ case AWSCRT_STAT_CAT_SOCKET: {
+ struct aws_crt_statistics_socket *socket_stats = (struct aws_crt_statistics_socket *)stats_base;
+ bytes_read = socket_stats->bytes_read;
+ bytes_written = socket_stats->bytes_written;
+ break;
+ }
+
+ case AWSCRT_STAT_CAT_HTTP1_CHANNEL: {
+ AWS_ASSERT(!h2);
+ struct aws_crt_statistics_http1_channel *http1_stats =
+ (struct aws_crt_statistics_http1_channel *)stats_base;
+ pending_read_interval_ms = http1_stats->pending_incoming_stream_ms;
+ pending_write_interval_ms = http1_stats->pending_outgoing_stream_ms;
+ h1_current_outgoing_stream_id = http1_stats->current_outgoing_stream_id;
+ h1_current_incoming_stream_id = http1_stats->current_incoming_stream_id;
+
+ break;
+ }
+
+ case AWSCRT_STAT_CAT_HTTP2_CHANNEL: {
+ struct aws_crt_statistics_http2_channel *h2_stats =
+ (struct aws_crt_statistics_http2_channel *)stats_base;
+ pending_read_interval_ms = h2_stats->pending_incoming_stream_ms;
+ pending_write_interval_ms = h2_stats->pending_outgoing_stream_ms;
+ h2_was_inactive |= h2_stats->was_inactive;
+ h2 = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ if (impl->options.statistics_observer_fn) {
+ impl->options.statistics_observer_fn(
+ (size_t)(uintptr_t)(context), stats_list, impl->options.statistics_observer_user_data);
+ }
+
+ struct aws_channel *channel = context;
+
+ uint64_t bytes_per_second = 0;
+ uint64_t max_pending_io_interval_ms = 0;
+
+ if (pending_write_interval_ms > 0) {
+ double fractional_bytes_written_per_second =
+ (double)bytes_written * (double)AWS_TIMESTAMP_MILLIS / (double)pending_write_interval_ms;
+ if (fractional_bytes_written_per_second >= (double)UINT64_MAX) {
+ bytes_per_second = UINT64_MAX;
+ } else {
+ bytes_per_second = (uint64_t)fractional_bytes_written_per_second;
+ }
+ max_pending_io_interval_ms = pending_write_interval_ms;
+ }
+
+ if (pending_read_interval_ms > 0) {
+ double fractional_bytes_read_per_second =
+ (double)bytes_read * (double)AWS_TIMESTAMP_MILLIS / (double)pending_read_interval_ms;
+ if (fractional_bytes_read_per_second >= (double)UINT64_MAX) {
+ bytes_per_second = UINT64_MAX;
+ } else {
+ bytes_per_second = aws_add_u64_saturating(bytes_per_second, (uint64_t)fractional_bytes_read_per_second);
+ }
+ if (pending_read_interval_ms > max_pending_io_interval_ms) {
+ max_pending_io_interval_ms = pending_read_interval_ms;
+ }
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_CHANNEL,
+ "id=%p: channel throughput - %" PRIu64 " bytes per second",
+ (void *)channel,
+ bytes_per_second);
+
+ /*
+ * Check throughput only if the connection has active stream and no gap between.
+ */
+ bool check_throughput = false;
+ if (h2) {
+ /* For HTTP/2, check throughput only if there always has any active stream on the connection */
+ check_throughput = !h2_was_inactive;
+ } else {
+ /* For HTTP/1, check throughput only if at least one stream exists and was observed in that role previously */
+ check_throughput =
+ (h1_current_incoming_stream_id != 0 && h1_current_incoming_stream_id == impl->last_incoming_stream_id) ||
+ (h1_current_outgoing_stream_id != 0 && h1_current_outgoing_stream_id == impl->last_outgoing_stream_id);
+
+ impl->last_outgoing_stream_id = h1_current_outgoing_stream_id;
+ impl->last_incoming_stream_id = h1_current_incoming_stream_id;
+ }
+ impl->last_measured_throughput = bytes_per_second;
+
+ if (!check_throughput) {
+ AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL, "id=%p: channel throughput does not need to be checked", (void *)channel);
+ impl->throughput_failure_time_ms = 0;
+ return;
+ }
+
+ if (bytes_per_second >= impl->options.minimum_throughput_bytes_per_second) {
+ impl->throughput_failure_time_ms = 0;
+ return;
+ }
+
+ impl->throughput_failure_time_ms =
+ aws_add_u64_saturating(impl->throughput_failure_time_ms, max_pending_io_interval_ms);
+
+ AWS_LOGF_INFO(
+ AWS_LS_IO_CHANNEL,
+ "id=%p: Channel low throughput warning. Currently %" PRIu64 " milliseconds of consecutive failure time",
+ (void *)channel,
+ impl->throughput_failure_time_ms);
+
+ uint64_t maximum_failure_time_ms = aws_timestamp_convert(
+ impl->options.allowable_throughput_failure_interval_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+ if (impl->throughput_failure_time_ms <= maximum_failure_time_ms) {
+ return;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_IO_CHANNEL,
+ "id=%p: Channel low throughput threshold exceeded (< %" PRIu64
+ " bytes per second for more than %u seconds). Shutting down.",
+ (void *)channel,
+ impl->options.minimum_throughput_bytes_per_second,
+ impl->options.allowable_throughput_failure_interval_seconds);
+
+ aws_channel_shutdown(channel, AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE);
+}
+
+static void s_destroy(struct aws_crt_statistics_handler *handler) {
+ if (handler == NULL) {
+ return;
+ }
+
+ aws_mem_release(handler->allocator, handler);
+}
+
+static uint64_t s_get_report_interval_ms(struct aws_crt_statistics_handler *handler) {
+ (void)handler;
+
+ return 1000;
+}
+
+static struct aws_crt_statistics_handler_vtable s_http_connection_monitor_vtable = {
+ .process_statistics = s_process_statistics,
+ .destroy = s_destroy,
+ .get_report_interval_ms = s_get_report_interval_ms,
+};
+
+struct aws_crt_statistics_handler *aws_crt_statistics_handler_new_http_connection_monitor(
+ struct aws_allocator *allocator,
+ struct aws_http_connection_monitoring_options *options) {
+ struct aws_crt_statistics_handler *handler = NULL;
+ struct aws_statistics_handler_http_connection_monitor_impl *impl = NULL;
+
+ if (!aws_mem_acquire_many(
+ allocator,
+ 2,
+ &handler,
+ sizeof(struct aws_crt_statistics_handler),
+ &impl,
+ sizeof(struct aws_statistics_handler_http_connection_monitor_impl))) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*handler);
+ AWS_ZERO_STRUCT(*impl);
+ impl->options = *options;
+
+ handler->vtable = &s_http_connection_monitor_vtable;
+ handler->allocator = allocator;
+ handler->impl = impl;
+
+ return handler;
+}
+
+bool aws_http_connection_monitoring_options_is_valid(const struct aws_http_connection_monitoring_options *options) {
+ if (options == NULL) {
+ return false;
+ }
+
+ return options->allowable_throughput_failure_interval_seconds > 0 &&
+ options->minimum_throughput_bytes_per_second > 0;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_connection.c b/contrib/restricted/aws/aws-c-http/source/h1_connection.c
new file mode 100644
index 00000000000..3532bb80d94
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_connection.c
@@ -0,0 +1,2064 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/clock.h>
+#include <aws/common/math.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/private/h1_connection.h>
+#include <aws/http/private/h1_decoder.h>
+#include <aws/http/private/h1_stream.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+enum {
+ DECODER_INITIAL_SCRATCH_SIZE = 256,
+};
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size);
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately);
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler);
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler);
+static void s_handler_destroy(struct aws_channel_handler *handler);
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot);
+static struct aws_http_stream *s_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+static struct aws_http_stream *s_new_server_request_handler_stream(
+ const struct aws_http_request_handler_options *options);
+static int s_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response);
+static void s_connection_close(struct aws_http_connection *connection_base);
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base);
+static bool s_connection_is_open(const struct aws_http_connection *connection_base);
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base);
+static int s_decoder_on_request(
+ enum aws_http_method method_enum,
+ const struct aws_byte_cursor *method_str,
+ const struct aws_byte_cursor *uri,
+ void *user_data);
+static int s_decoder_on_response(int status_code, void *user_data);
+static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void *user_data);
+static int s_decoder_on_body(const struct aws_byte_cursor *data, bool finished, void *user_data);
+static int s_decoder_on_done(void *user_data);
+static void s_reset_statistics(struct aws_channel_handler *handler);
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats);
+static void s_write_outgoing_stream(struct aws_h1_connection *connection, bool first_try);
+static int s_try_process_next_stream_read_message(struct aws_h1_connection *connection, bool *out_stop_processing);
+
+static struct aws_http_connection_vtable s_h1_connection_vtable = {
+ .channel_handler_vtable =
+ {
+ .process_read_message = s_handler_process_read_message,
+ .process_write_message = s_handler_process_write_message,
+ .increment_read_window = s_handler_increment_read_window,
+ .shutdown = s_handler_shutdown,
+ .initial_window_size = s_handler_initial_window_size,
+ .message_overhead = s_handler_message_overhead,
+ .destroy = s_handler_destroy,
+ .reset_statistics = s_reset_statistics,
+ .gather_statistics = s_gather_statistics,
+ },
+ .on_channel_handler_installed = s_handler_installed,
+ .make_request = s_make_request,
+ .new_server_request_handler_stream = s_new_server_request_handler_stream,
+ .stream_send_response = s_stream_send_response,
+ .close = s_connection_close,
+ .stop_new_requests = s_connection_stop_new_request,
+ .is_open = s_connection_is_open,
+ .new_requests_allowed = s_connection_new_requests_allowed,
+ .change_settings = NULL,
+ .send_ping = NULL,
+ .send_goaway = NULL,
+ .get_sent_goaway = NULL,
+ .get_received_goaway = NULL,
+ .get_local_settings = NULL,
+ .get_remote_settings = NULL,
+};
+
+static const struct aws_h1_decoder_vtable s_h1_decoder_vtable = {
+ .on_request = s_decoder_on_request,
+ .on_response = s_decoder_on_response,
+ .on_header = s_decoder_on_header,
+ .on_body = s_decoder_on_body,
+ .on_done = s_decoder_on_done,
+};
+
+void aws_h1_connection_lock_synced_data(struct aws_h1_connection *connection) {
+ int err = aws_mutex_lock(&connection->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+void aws_h1_connection_unlock_synced_data(struct aws_h1_connection *connection) {
+ int err = aws_mutex_unlock(&connection->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+/**
+ * Internal function for bringing connection to a stop.
+ * Invoked multiple times, including when:
+ * - Channel is shutting down in the read direction.
+ * - Channel is shutting down in the write direction.
+ * - An error occurs.
+ * - User wishes to close the connection (this is the only case where the function may run off-thread).
+ */
+static void s_stop(
+ struct aws_h1_connection *connection,
+ bool stop_reading,
+ bool stop_writing,
+ bool schedule_shutdown,
+ int error_code) {
+
+ AWS_ASSERT(stop_reading || stop_writing || schedule_shutdown); /* You are required to stop at least 1 thing */
+
+ if (stop_reading) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_reading_stopped = true;
+ }
+
+ if (stop_writing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_writing_stopped = true;
+ }
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+
+ /* Even if we're not scheduling shutdown just yet (ex: sent final request but waiting to read final response)
+ * we don't consider the connection "open" anymore so user can't create more streams */
+ connection->synced_data.is_open = false;
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (schedule_shutdown) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Shutting down connection with error code %d (%s).",
+ (void *)&connection->base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_channel_shutdown(connection->base.channel_slot->channel, error_code);
+ }
+}
+
+static void s_shutdown_due_to_error(struct aws_h1_connection *connection, int error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (!error_code) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ /* Stop reading AND writing if an error occurs.
+ *
+ * It doesn't currently seem worth the complexity to distinguish between read errors and write errors.
+ * The only scenarios that would benefit from this are pipelining scenarios (ex: A server
+ * could continue sending a response to request A if there was an error reading request B).
+ * But pipelining in HTTP/1.1 is known to be fragile with regards to errors, so let's just keep it simple.
+ */
+ s_stop(connection, true /*stop_reading*/, true /*stop_writing*/, true /*schedule_shutdown*/, error_code);
+}
+
+/**
+ * Public function for closing connection.
+ */
+static void s_connection_close(struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+
+ /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */
+ s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_SUCCESS);
+}
+
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ if (!connection->synced_data.new_stream_error_code) {
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+}
+
+static bool s_connection_is_open(const struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+ bool is_open;
+
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ is_open = connection->synced_data.is_open;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ return is_open;
+}
+
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ return new_stream_error_code == 0;
+}
+
+static int s_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response) {
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(response);
+ struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base);
+ return aws_h1_stream_send_response(h1_stream, response);
+}
+
+/* Calculate the desired window size for connection that has switched protocols and become a midchannel handler. */
+static size_t s_calculate_midchannel_desired_connection_window(struct aws_h1_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(connection->thread_data.has_switched_protocols);
+
+ if (!connection->base.channel_slot->adj_right) {
+ /* No downstream handler installed. */
+ return 0;
+ }
+
+ /* Connection is just dumbly forwarding aws_io_messages, so try to match downstream handler. */
+ return aws_channel_slot_downstream_read_window(connection->base.channel_slot);
+}
+
+/* Calculate the desired window size for a connection that is processing data for aws_http_streams. */
+static size_t s_calculate_stream_mode_desired_connection_window(struct aws_h1_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(!connection->thread_data.has_switched_protocols);
+
+ if (!connection->base.stream_manual_window_management) {
+ return SIZE_MAX;
+ }
+
+ /* Connection window should match the available space in the read-buffer */
+ AWS_ASSERT(
+ connection->thread_data.read_buffer.pending_bytes <= connection->thread_data.read_buffer.capacity &&
+ "This isn't fatal, but our math is off");
+ const size_t desired_connection_window = aws_sub_size_saturating(
+ connection->thread_data.read_buffer.capacity, connection->thread_data.read_buffer.pending_bytes);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Window stats: connection=%zu+%zu stream=%" PRIu64 " buffer=%zu/%zu",
+ (void *)&connection->base,
+ connection->thread_data.connection_window,
+ desired_connection_window - connection->thread_data.connection_window /*increment_size*/,
+ connection->thread_data.incoming_stream ? connection->thread_data.incoming_stream->thread_data.stream_window
+ : 0,
+ connection->thread_data.read_buffer.pending_bytes,
+ connection->thread_data.read_buffer.capacity);
+
+ return desired_connection_window;
+}
+
+/* Increment connection window, if necessary */
+static int s_update_connection_window(struct aws_h1_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.is_reading_stopped) {
+ return AWS_OP_SUCCESS;
+ }
+
+ const size_t desired_size = connection->thread_data.has_switched_protocols
+ ? s_calculate_midchannel_desired_connection_window(connection)
+ : s_calculate_stream_mode_desired_connection_window(connection);
+
+ const size_t increment_size = aws_sub_size_saturating(desired_size, connection->thread_data.connection_window);
+ if (increment_size > 0) {
+ /* Update local `connection_window`. See comments at variable's declaration site
+ * on why we use this instead of the official `aws_channel_slot.window_size` */
+ connection->thread_data.connection_window += increment_size;
+ connection->thread_data.recent_window_increments =
+ aws_add_size_saturating(connection->thread_data.recent_window_increments, increment_size);
+ if (aws_channel_slot_increment_read_window(connection->base.channel_slot, increment_size)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to increment read window, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_h1_stream_activate(struct aws_http_stream *stream) {
+ struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base);
+
+ struct aws_http_connection *base_connection = stream->owning_connection;
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h1_connection, base);
+
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ /* Note: We're touching both the connection's and stream's synced_data in this section,
+ * which is OK because an h1_connection and all its h1_streams share a single lock. */
+ aws_h1_connection_lock_synced_data(connection);
+
+ if (stream->id) {
+ /* stream has already been activated. */
+ aws_h1_connection_unlock_synced_data(connection);
+ return AWS_OP_SUCCESS;
+ }
+
+ if (connection->synced_data.new_stream_error_code) {
+ aws_h1_connection_unlock_synced_data(connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to activate the stream id=%p, new streams are not allowed now. error %d (%s)",
+ (void *)&connection->base,
+ (void *)stream,
+ connection->synced_data.new_stream_error_code,
+ aws_error_name(connection->synced_data.new_stream_error_code));
+ return aws_raise_error(connection->synced_data.new_stream_error_code);
+ }
+
+ stream->id = aws_http_connection_get_next_stream_id(base_connection);
+ if (!stream->id) {
+ aws_h1_connection_unlock_synced_data(connection);
+ /* aws_http_connection_get_next_stream_id() raises its own error. */
+ return AWS_OP_ERR;
+ }
+
+ /* ID successfully assigned */
+ h1_stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_ACTIVE;
+
+ aws_linked_list_push_back(&connection->synced_data.new_client_stream_list, &h1_stream->node);
+ if (!connection->synced_data.is_cross_thread_work_task_scheduled) {
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ should_schedule_task = true;
+ }
+
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* connection keeps activated stream alive until stream completes */
+ aws_atomic_fetch_add(&stream->refcount, 1);
+
+ if (should_schedule_task) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Scheduling connection cross-thread work task.", (void *)base_connection);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection cross-thread work task was already scheduled",
+ (void *)base_connection);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_http_stream *s_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+ struct aws_h1_stream *stream = aws_h1_stream_new_request(client_connection, options);
+ if (!stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create request stream, error %d (%s)",
+ (void *)client_connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return NULL;
+ }
+
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(client_connection, struct aws_h1_connection, base);
+
+ /* Insert new stream into pending list, and schedule outgoing_stream_task if it's not already running. */
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (new_stream_error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create request stream, error %d (%s)",
+ (void *)client_connection,
+ new_stream_error_code,
+ aws_error_name(new_stream_error_code));
+
+ aws_raise_error(new_stream_error_code);
+ goto error;
+ }
+
+ /* Success! */
+ struct aws_byte_cursor method;
+ aws_http_message_get_request_method(options->request, &method);
+ stream->base.request_method = aws_http_str_to_method(method);
+ struct aws_byte_cursor path;
+ aws_http_message_get_request_path(options->request, &path);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Created client request on connection=%p: " PRInSTR " " PRInSTR " " PRInSTR,
+ (void *)&stream->base,
+ (void *)client_connection,
+ AWS_BYTE_CURSOR_PRI(method),
+ AWS_BYTE_CURSOR_PRI(path),
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->base.http_version)));
+
+ return &stream->base;
+
+error:
+ /* Force destruction of the stream, avoiding ref counting */
+ stream->base.vtable->destroy(&stream->base);
+ return NULL;
+}
+
+/* Extract work items from synced_data, and perform the work on-thread. */
+static void s_cross_thread_work_task(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+ (void)channel_task;
+ struct aws_h1_connection *connection = arg;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Running connection cross-thread work task.", (void *)&connection->base);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+
+ connection->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ bool has_new_client_streams = !aws_linked_list_empty(&connection->synced_data.new_client_stream_list);
+ aws_linked_list_move_all_back(
+ &connection->thread_data.stream_list, &connection->synced_data.new_client_stream_list);
+
+ aws_h1_connection_unlock_synced_data(connection);
+ /* END CRITICAL SECTION */
+
+ /* Kick off outgoing-stream task if necessary */
+ if (has_new_client_streams) {
+ aws_h1_connection_try_write_outgoing_stream(connection);
+ }
+}
+
+static bool s_aws_http_stream_was_successful_connect(struct aws_h1_stream *stream) {
+ struct aws_http_stream *base = &stream->base;
+ if (base->request_method != AWS_HTTP_METHOD_CONNECT) {
+ return false;
+ }
+
+ if (base->client_data == NULL) {
+ return false;
+ }
+
+ if (base->client_data->response_status != AWS_HTTP_STATUS_CODE_200_OK) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Validate and perform a protocol switch on a connection. Protocol switching essentially turns the connection's
+ * handler into a dummy pass-through. It is valid to switch protocols to the same protocol resulting in a channel
+ * that has a "dead" http handler in the middle of the channel (which negotiated the CONNECT through the proxy) and
+ * a "live" handler on the end which takes the actual http requests. By doing this, we get the exact same
+ * behavior whether we're transitioning to http or any other protocol: once the CONNECT succeeds
+ * the first http handler is put in pass-through mode and a new protocol (which could be http) is tacked onto the end.
+ */
+static int s_aws_http1_switch_protocols(struct aws_h1_connection *connection) {
+ AWS_FATAL_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ /* Switching protocols while there are multiple streams is too complex to deal with.
+ * Ensure stream_list has exactly this 1 stream in it. */
+ if (aws_linked_list_begin(&connection->thread_data.stream_list) !=
+ aws_linked_list_rbegin(&connection->thread_data.stream_list)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot switch protocols while further streams are pending, closing connection.",
+ (void *)&connection->base);
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection has switched protocols, another channel handler must be installed to"
+ " deal with further data.",
+ (void *)&connection->base);
+
+ connection->thread_data.has_switched_protocols = true;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_SWITCHED_PROTOCOLS;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_stream_complete(struct aws_h1_stream *stream, int error_code) {
+ struct aws_h1_connection *connection =
+ AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h1_connection, base);
+
+ /*
+ * If this is the end of a successful CONNECT request, mark ourselves as pass-through since the proxy layer
+ * will be tacking on a new http handler (and possibly a tls handler in-between).
+ */
+ if (error_code == AWS_ERROR_SUCCESS && s_aws_http_stream_was_successful_connect(stream)) {
+ if (s_aws_http1_switch_protocols(connection)) {
+ error_code = AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE;
+ s_shutdown_due_to_error(connection, error_code);
+ }
+ }
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ if (stream->base.client_data && stream->is_incoming_message_done) {
+ /* As a request that finished receiving the response, we ignore error and
+ * consider it finished successfully */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Ignoring error code %d (%s). The response has been fully received,"
+ "so the stream will complete successfully.",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+ error_code = AWS_ERROR_SUCCESS;
+ }
+ if (stream->base.server_data && stream->is_outgoing_message_done) {
+ /* As a server finished sending the response, but still failed with the request was not finished receiving.
+ * We ignore error and consider it finished successfully */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Ignoring error code %d (%s). The response has been fully sent,"
+ " so the stream will complete successfully",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+ error_code = AWS_ERROR_SUCCESS;
+ }
+ }
+
+ /* Remove stream from list. */
+ aws_linked_list_remove(&stream->node);
+
+ /* Nice logging */
+ if (error_code) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Stream completed with error code %d (%s).",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+
+ } else if (stream->base.client_data) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Client request complete, response status: %d (%s).",
+ (void *)&stream->base,
+ stream->base.client_data->response_status,
+ aws_http_status_text(stream->base.client_data->response_status));
+ } else {
+ AWS_ASSERT(stream->base.server_data);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Server response to " PRInSTR " request complete.",
+ (void *)&stream->base,
+ AWS_BYTE_CURSOR_PRI(stream->base.server_data->request_method_str));
+ }
+
+ /* If connection must shut down, do it BEFORE invoking stream-complete callback.
+ * That way, if aws_http_connection_is_open() is called from stream-complete callback, it returns false. */
+ if (stream->is_final_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Closing connection due to completion of final stream.",
+ (void *)&connection->base);
+
+ s_connection_close(&connection->base);
+ }
+
+ { /* BEGIN CRITICAL SECTION */
+ /* Note: We're touching the stream's synced_data here, which is OK
+ * because an h1_connection and all its h1_streams share a single lock. */
+ aws_h1_connection_lock_synced_data(connection);
+
+ /* Mark stream complete */
+ stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_COMPLETE;
+
+ /* Move chunks out of synced data */
+ aws_linked_list_move_all_back(&stream->thread_data.pending_chunk_list, &stream->synced_data.pending_chunk_list);
+
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* Complete any leftover chunks */
+ while (!aws_linked_list_empty(&stream->thread_data.pending_chunk_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.pending_chunk_list);
+ struct aws_h1_chunk *chunk = AWS_CONTAINER_OF(node, struct aws_h1_chunk, node);
+ aws_h1_chunk_complete_and_destroy(chunk, &stream->base, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED);
+ }
+
+ /* Invoke callback and clean up stream. */
+ if (stream->base.on_complete) {
+ stream->base.on_complete(&stream->base, error_code, stream->base.user_data);
+ }
+
+ aws_http_stream_release(&stream->base);
+}
+
+static void s_add_time_measurement_to_stats(uint64_t start_ns, uint64_t end_ns, uint64_t *output_ms) {
+ if (end_ns > start_ns) {
+ *output_ms += aws_timestamp_convert(end_ns - start_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL);
+ }
+}
+
+static void s_set_outgoing_stream_ptr(
+ struct aws_h1_connection *connection,
+ struct aws_h1_stream *next_outgoing_stream) {
+ struct aws_h1_stream *prev = connection->thread_data.outgoing_stream;
+
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ if (prev == NULL && next_outgoing_stream != NULL) {
+ /* transition from nothing to write -> something to write */
+ connection->thread_data.outgoing_stream_timestamp_ns = now_ns;
+ } else if (prev != NULL && next_outgoing_stream == NULL) {
+ /* transition from something to write -> nothing to write */
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+ }
+
+ connection->thread_data.outgoing_stream = next_outgoing_stream;
+}
+
+static void s_set_incoming_stream_ptr(
+ struct aws_h1_connection *connection,
+ struct aws_h1_stream *next_incoming_stream) {
+ struct aws_h1_stream *prev = connection->thread_data.incoming_stream;
+
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ if (prev == NULL && next_incoming_stream != NULL) {
+ /* transition from nothing to read -> something to read */
+ connection->thread_data.incoming_stream_timestamp_ns = now_ns;
+ } else if (prev != NULL && next_incoming_stream == NULL) {
+ /* transition from something to read -> nothing to read */
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+ }
+
+ connection->thread_data.incoming_stream = next_incoming_stream;
+}
+
+/**
+ * Ensure `incoming_stream` is pointing at the correct stream, and update state if it changes.
+ */
+static void s_client_update_incoming_stream_ptr(struct aws_h1_connection *connection) {
+ struct aws_linked_list *list = &connection->thread_data.stream_list;
+ struct aws_h1_stream *desired;
+ if (connection->thread_data.is_reading_stopped) {
+ desired = NULL;
+ } else if (aws_linked_list_empty(list)) {
+ desired = NULL;
+ } else {
+ desired = AWS_CONTAINER_OF(aws_linked_list_begin(list), struct aws_h1_stream, node);
+ }
+
+ if (connection->thread_data.incoming_stream == desired) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Current incoming stream is now %p.",
+ (void *)&connection->base,
+ desired ? (void *)&desired->base : NULL);
+
+ s_set_incoming_stream_ptr(connection, desired);
+}
+
+/**
+ * If necessary, update `outgoing_stream` so it is pointing at a stream
+ * with data to send, or NULL if all streams are done sending data.
+ *
+ * Called from event-loop thread.
+ * This function has lots of side effects.
+ */
+static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct aws_h1_connection *connection) {
+ struct aws_h1_stream *current = connection->thread_data.outgoing_stream;
+ bool current_changed = false;
+ int err;
+
+ /* If current stream is done sending data... */
+ if (current && !aws_h1_encoder_is_message_in_progress(&connection->thread_data.encoder)) {
+ current->is_outgoing_message_done = true;
+
+ /* RFC-7230 section 6.6: Tear-down.
+ * If this was the final stream, don't allows any further streams to be sent */
+ if (current->is_final_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Done sending final stream, no further streams will be sent.",
+ (void *)&connection->base);
+
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ true /*stop_writing*/,
+ false /*schedule_shutdown*/,
+ AWS_ERROR_SUCCESS);
+ }
+
+ /* If it's also done receiving data, then it's complete! */
+ if (current->is_incoming_message_done) {
+ /* Only 1st stream in list could finish receiving before it finished sending */
+ AWS_ASSERT(&current->node == aws_linked_list_begin(&connection->thread_data.stream_list));
+
+ /* This removes stream from list */
+ s_stream_complete(current, AWS_ERROR_SUCCESS);
+ }
+
+ current = NULL;
+ current_changed = true;
+ }
+
+ /* If current stream is NULL, look for more work. */
+ if (!current && !connection->thread_data.is_writing_stopped) {
+
+ /* Look for next stream we can work on. */
+ for (struct aws_linked_list_node *node = aws_linked_list_begin(&connection->thread_data.stream_list);
+ node != aws_linked_list_end(&connection->thread_data.stream_list);
+ node = aws_linked_list_next(node)) {
+
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(node, struct aws_h1_stream, node);
+
+ /* If we already sent this stream's data, keep looking... */
+ if (stream->is_outgoing_message_done) {
+ continue;
+ }
+
+ /* STOP if we're a server, and this stream's response isn't ready to send.
+ * It's not like we can skip this and start on the next stream because responses must be sent in order.
+ * Don't need a check like this for clients because their streams always start with data to send. */
+ if (connection->base.server_data && !stream->thread_data.has_outgoing_response) {
+ break;
+ }
+
+ /* We found a stream to work on! */
+ current = stream;
+ current_changed = true;
+ break;
+ }
+ }
+
+ /* Update current incoming and outgoing streams. */
+ if (current_changed) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Current outgoing stream is now %p.",
+ (void *)&connection->base,
+ current ? (void *)&current->base : NULL);
+
+ s_set_outgoing_stream_ptr(connection, current);
+
+ if (current) {
+ err = aws_h1_encoder_start_message(
+ &connection->thread_data.encoder, &current->encoder_message, &current->base);
+ (void)err;
+ AWS_ASSERT(!err);
+ }
+
+ /* incoming_stream update is only for client */
+ if (connection->base.client_data) {
+ s_client_update_incoming_stream_ptr(connection);
+ }
+ }
+
+ return current;
+}
+
+/* Runs after an aws_io_message containing HTTP has completed (written to the network, or failed).
+ * This does NOT run after switching protocols, when we're dumbly forwarding aws_io_messages
+ * as a midchannel handler. */
+static void s_on_channel_write_complete(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data) {
+
+ (void)message;
+ struct aws_h1_connection *connection = user_data;
+ AWS_ASSERT(connection->thread_data.is_outgoing_stream_task_active);
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (err_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Message did not write to network, error %d (%s)",
+ (void *)&connection->base,
+ err_code,
+ aws_error_name(err_code));
+
+ s_shutdown_due_to_error(connection, err_code);
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Message finished writing to network. Rescheduling outgoing stream task.",
+ (void *)&connection->base);
+
+ /* To avoid wasting memory, we only want ONE of our written aws_io_messages in the channel at a time.
+ * Therefore, we wait until it's written to the network before trying to send another
+ * by running the outgoing-stream-task again.
+ *
+ * We also want to share the network with other channels.
+ * Therefore, when the write completes, we SCHEDULE the outgoing-stream-task
+ * to run again instead of calling the function directly.
+ * This way, if the message completes synchronously,
+ * we're not hogging the network by writing message after message in a tight loop */
+ aws_channel_schedule_task_now(channel, &connection->outgoing_stream_task);
+}
+
+static void s_outgoing_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_h1_connection *connection = arg;
+ AWS_ASSERT(connection->thread_data.is_outgoing_stream_task_active);
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ s_write_outgoing_stream(connection, false /*first_try*/);
+}
+
+void aws_h1_connection_try_write_outgoing_stream(struct aws_h1_connection *connection) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.is_outgoing_stream_task_active) {
+ /* Task is already active */
+ return;
+ }
+
+ connection->thread_data.is_outgoing_stream_task_active = true;
+ s_write_outgoing_stream(connection, true /*first_try*/);
+}
+
+/* Do the actual work of the outgoing-stream-task */
+static void s_write_outgoing_stream(struct aws_h1_connection *connection, bool first_try) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(connection->thread_data.is_outgoing_stream_task_active);
+
+ /* Just stop if we're no longer writing stream data */
+ if (connection->thread_data.is_writing_stopped || connection->thread_data.has_switched_protocols) {
+ return;
+ }
+
+ /* Determine whether we have data available to send, and end task immediately if there's not.
+ * The outgoing stream task will be kicked off again when user adds more data (new stream, new chunk, etc) */
+ struct aws_h1_stream *outgoing_stream = s_update_outgoing_stream_ptr(connection);
+ bool waiting_for_chunks = aws_h1_encoder_is_waiting_for_chunks(&connection->thread_data.encoder);
+ if (!outgoing_stream || waiting_for_chunks) {
+ if (!first_try) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Outgoing stream task stopped. outgoing_stream=%p waiting_for_chunks:%d",
+ (void *)&connection->base,
+ outgoing_stream ? (void *)&outgoing_stream->base : NULL,
+ waiting_for_chunks);
+ }
+ connection->thread_data.is_outgoing_stream_task_active = false;
+ return;
+ }
+
+ if (first_try) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "id=%p: Outgoing stream task has begun.", (void *)&connection->base);
+ }
+
+ struct aws_io_message *msg = aws_channel_slot_acquire_max_message_for_write(connection->base.channel_slot);
+ if (!msg) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to acquire message from pool, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Set up callback so we can send another message when this one completes */
+ msg->on_completion = s_on_channel_write_complete;
+ msg->user_data = connection;
+
+ /*
+ * Fill message data from the outgoing stream.
+ * Note that we might be resuming work on a stream from a previous run of this task.
+ */
+ if (AWS_OP_SUCCESS != aws_h1_encoder_process(&connection->thread_data.encoder, &msg->message_data)) {
+ /* Error sending data, abandon ship */
+ goto error;
+ }
+
+ if (msg->message_data.len > 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Outgoing stream task is sending message of size %zu.",
+ (void *)&connection->base,
+ msg->message_data.len);
+
+ if (aws_channel_slot_send_message(connection->base.channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to send message in write direction, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ } else {
+ /* If message is empty, warn that no work is being done
+ * and reschedule the task to try again next tick.
+ * It's likely that body isn't ready, so body streaming function has no data to write yet.
+ * If this scenario turns out to be common we should implement a "pause" feature. */
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Current outgoing stream %p sent no data, will try again next tick.",
+ (void *)&connection->base,
+ outgoing_stream ? (void *)&outgoing_stream->base : NULL);
+
+ aws_mem_release(msg->allocator, msg);
+
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->outgoing_stream_task);
+ }
+
+ return;
+error:
+ if (msg) {
+ aws_mem_release(msg->allocator, msg);
+ }
+ s_shutdown_due_to_error(connection, aws_last_error());
+}
+
+static int s_decoder_on_request(
+ enum aws_http_method method_enum,
+ const struct aws_byte_cursor *method_str,
+ const struct aws_byte_cursor *uri,
+ void *user_data) {
+
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+
+ AWS_FATAL_ASSERT(connection->thread_data.incoming_stream->base.server_data); /* Request but I'm a client?!?!? */
+
+ AWS_ASSERT(incoming_stream->base.server_data->request_method_str.len == 0);
+ AWS_ASSERT(incoming_stream->base.server_data->request_path.len == 0);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming request: method=" PRInSTR " uri=" PRInSTR,
+ (void *)&incoming_stream->base,
+ AWS_BYTE_CURSOR_PRI(*method_str),
+ AWS_BYTE_CURSOR_PRI(*uri));
+
+ /* Copy strings to internal buffer */
+ struct aws_byte_buf *storage_buf = &incoming_stream->incoming_storage_buf;
+ AWS_ASSERT(storage_buf->capacity == 0);
+
+ size_t storage_size = 0;
+ int err = aws_add_size_checked(uri->len, method_str->len, &storage_size);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_byte_buf_init(storage_buf, incoming_stream->base.alloc, storage_size);
+ if (err) {
+ goto error;
+ }
+
+ aws_byte_buf_write_from_whole_cursor(storage_buf, *method_str);
+ incoming_stream->base.server_data->request_method_str = aws_byte_cursor_from_buf(storage_buf);
+
+ aws_byte_buf_write_from_whole_cursor(storage_buf, *uri);
+ incoming_stream->base.server_data->request_path = aws_byte_cursor_from_buf(storage_buf);
+ aws_byte_cursor_advance(&incoming_stream->base.server_data->request_path, storage_buf->len - uri->len);
+ incoming_stream->base.request_method = method_enum;
+
+ /* No user callbacks, so we're not checking for shutdown */
+ return AWS_OP_SUCCESS;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to process new incoming request, error %d (%s).",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+}
+
+static int s_decoder_on_response(int status_code, void *user_data) {
+ struct aws_h1_connection *connection = user_data;
+
+ AWS_FATAL_ASSERT(connection->thread_data.incoming_stream->base.client_data); /* Response but I'm a server?!?!? */
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming response status: %d (%s).",
+ (void *)&connection->thread_data.incoming_stream->base,
+ status_code,
+ aws_http_status_text(status_code));
+
+ connection->thread_data.incoming_stream->base.client_data->response_status = status_code;
+
+ /* No user callbacks, so we're not checking for shutdown */
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void *user_data) {
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming header: " PRInSTR ": " PRInSTR,
+ (void *)&incoming_stream->base,
+ AWS_BYTE_CURSOR_PRI(header->name_data),
+ AWS_BYTE_CURSOR_PRI(header->value_data));
+
+ enum aws_http_header_block header_block =
+ aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder);
+
+ /* RFC-7230 section 6.1.
+ * "Connection: close" header signals that a connection will not persist after the current request/response */
+ if (header->name == AWS_HTTP_HEADER_CONNECTION) {
+ /* Certain L7 proxies send a connection close header on a 200/OK response to a CONNECT request. This is nutty
+ * behavior, but the obviously desired behavior on a 200 CONNECT response is to leave the connection open
+ * for the tunneling. */
+ bool ignore_connection_close =
+ incoming_stream->base.request_method == AWS_HTTP_METHOD_CONNECT && incoming_stream->base.client_data &&
+ incoming_stream->base.client_data->response_status == AWS_HTTP_STATUS_CODE_200_OK;
+
+ if (!ignore_connection_close && aws_byte_cursor_eq_c_str_ignore_case(&header->value_data, "close")) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Received 'Connection: close' header. This will be the final stream on this connection.",
+ (void *)&incoming_stream->base);
+
+ incoming_stream->is_final_stream = true;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (connection->base.client_data) {
+ /**
+ * RFC-9112 section 9.6.
+ * A client that receives a "close" connection option MUST cease sending
+ * requests on that connection and close the connection after reading the
+ * response message containing the "close" connection option.
+ *
+ * Mark the stream's outgoing message as complete,
+ * so that we stop sending, and stop waiting for it to finish sending.
+ **/
+ if (!incoming_stream->is_outgoing_message_done) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Received 'Connection: close' header, no more request data will be sent.",
+ (void *)&incoming_stream->base);
+ incoming_stream->is_outgoing_message_done = true;
+ }
+ /* Stop writing right now.
+ * Shutdown will be scheduled after we finishing parsing the response */
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ true /*stop_writing*/,
+ false /*schedule_shutdown*/,
+ AWS_ERROR_SUCCESS);
+ }
+ }
+ }
+
+ if (incoming_stream->base.on_incoming_headers) {
+ struct aws_http_header deliver = {
+ .name = header->name_data,
+ .value = header->value_data,
+ };
+
+ int err = incoming_stream->base.on_incoming_headers(
+ &incoming_stream->base, header_block, &deliver, 1, incoming_stream->base.user_data);
+
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming header callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_mark_head_done(struct aws_h1_stream *incoming_stream) {
+ /* Bail out if we've already done this */
+ if (incoming_stream->is_incoming_head_done) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_h1_connection *connection =
+ AWS_CONTAINER_OF(incoming_stream->base.owning_connection, struct aws_h1_connection, base);
+
+ enum aws_http_header_block header_block =
+ aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder);
+
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Main header block done.", (void *)&incoming_stream->base);
+ incoming_stream->is_incoming_head_done = true;
+
+ } else if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Informational header block done.", (void *)&incoming_stream->base);
+
+ /* Only clients can receive informational headers.
+ * Check whether we're switching protocols */
+ if (incoming_stream->base.client_data->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS) {
+ if (s_aws_http1_switch_protocols(connection)) {
+ return AWS_OP_ERR;
+ }
+ }
+ }
+
+ /* Invoke user cb */
+ if (incoming_stream->base.on_incoming_header_block_done) {
+ int err = incoming_stream->base.on_incoming_header_block_done(
+ &incoming_stream->base, header_block, incoming_stream->base.user_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming-header-block-done callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_body(const struct aws_byte_cursor *data, bool finished, void *user_data) {
+ (void)finished;
+
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+ AWS_ASSERT(incoming_stream);
+
+ int err = s_mark_head_done(incoming_stream);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* No need to invoke callback for 0-length data */
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming body: %zu bytes received.", (void *)&incoming_stream->base, data->len);
+
+ if (connection->base.stream_manual_window_management) {
+ /* Let stream window shrink by amount of body data received */
+ if (data->len > incoming_stream->thread_data.stream_window) {
+ /* This error shouldn't be possible, but it's all complicated, so do runtime check to be safe. */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Internal error. Data exceeds HTTP-stream's window.",
+ (void *)&incoming_stream->base);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ incoming_stream->thread_data.stream_window -= data->len;
+
+ if (incoming_stream->thread_data.stream_window == 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Flow-control window has reached 0. No more data can be received until window is updated.",
+ (void *)&incoming_stream->base);
+ }
+ }
+
+ if (incoming_stream->base.on_incoming_body) {
+ err = incoming_stream->base.on_incoming_body(&incoming_stream->base, data, incoming_stream->base.user_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming body callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_done(void *user_data) {
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+ AWS_ASSERT(incoming_stream);
+
+ /* Ensure head was marked done */
+ int err = s_mark_head_done(incoming_stream);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ /* If it is a informational response, we stop here, keep waiting for new response */
+ enum aws_http_header_block header_block =
+ aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder);
+ if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Otherwise the incoming stream is finished decoding and we will update it if needed */
+ incoming_stream->is_incoming_message_done = true;
+
+ /* RFC-7230 section 6.6
+ * After reading the final message, the connection must not read any more */
+ if (incoming_stream->is_final_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Done reading final stream, no further streams will be read.",
+ (void *)&connection->base);
+
+ s_stop(
+ connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, AWS_ERROR_SUCCESS);
+ }
+
+ if (connection->base.server_data) {
+ /* Server side */
+ aws_http_on_incoming_request_done_fn *on_request_done = incoming_stream->base.server_data->on_request_done;
+ if (on_request_done) {
+ err = on_request_done(&incoming_stream->base, incoming_stream->base.user_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming request done callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ }
+ if (incoming_stream->is_outgoing_message_done) {
+ AWS_ASSERT(&incoming_stream->node == aws_linked_list_begin(&connection->thread_data.stream_list));
+ s_stream_complete(incoming_stream, AWS_ERROR_SUCCESS);
+ }
+ s_set_incoming_stream_ptr(connection, NULL);
+
+ } else if (incoming_stream->is_outgoing_message_done) {
+ /* Client side */
+ AWS_ASSERT(&incoming_stream->node == aws_linked_list_begin(&connection->thread_data.stream_list));
+
+ s_stream_complete(incoming_stream, AWS_ERROR_SUCCESS);
+
+ s_client_update_incoming_stream_ptr(connection);
+ }
+
+ /* Report success even if user's on_complete() callback shuts down on the connection.
+ * We don't want it to look like something went wrong while decoding.
+ * The decode() function returns after each message completes,
+ * and we won't call decode() again if the connection has been shut down */
+ return AWS_OP_SUCCESS;
+}
+
+/* Common new() logic for server & client */
+static struct aws_h1_connection *s_connection_new(
+ struct aws_allocator *alloc,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options,
+ bool server) {
+
+ struct aws_h1_connection *connection = aws_mem_calloc(alloc, 1, sizeof(struct aws_h1_connection));
+ if (!connection) {
+ goto error_connection_alloc;
+ }
+
+ connection->base.vtable = &s_h1_connection_vtable;
+ connection->base.alloc = alloc;
+ connection->base.channel_handler.vtable = &s_h1_connection_vtable.channel_handler_vtable;
+ connection->base.channel_handler.alloc = alloc;
+ connection->base.channel_handler.impl = connection;
+ connection->base.http_version = AWS_HTTP_VERSION_1_1;
+ connection->base.stream_manual_window_management = manual_window_management;
+
+ /* Init the next stream id (server must use even ids, client odd [RFC 7540 5.1.1])*/
+ connection->base.next_stream_id = server ? 2 : 1;
+
+ /* 1 refcount for user */
+ aws_atomic_init_int(&connection->base.refcount, 1);
+
+ if (manual_window_management) {
+ connection->initial_stream_window_size = initial_window_size;
+
+ if (http1_options->read_buffer_capacity > 0) {
+ connection->thread_data.read_buffer.capacity = http1_options->read_buffer_capacity;
+ } else {
+ /* User did not set capacity, choose something reasonable based on initial_window_size */
+ /* NOTE: These values are currently guesses, we should test to find good values */
+ const size_t clamp_min = aws_min_size(g_aws_channel_max_fragment_size * 4, /*256KB*/ 256 * 1024);
+ const size_t clamp_max = /*1MB*/ 1 * 1024 * 1024;
+ connection->thread_data.read_buffer.capacity =
+ aws_max_size(clamp_min, aws_min_size(clamp_max, initial_window_size));
+ }
+
+ connection->thread_data.connection_window = connection->thread_data.read_buffer.capacity;
+ } else {
+ /* No backpressure, keep connection window at SIZE_MAX */
+ connection->initial_stream_window_size = SIZE_MAX;
+ connection->thread_data.read_buffer.capacity = SIZE_MAX;
+ connection->thread_data.connection_window = SIZE_MAX;
+ }
+
+ aws_h1_encoder_init(&connection->thread_data.encoder, alloc);
+
+ aws_channel_task_init(
+ &connection->outgoing_stream_task, s_outgoing_stream_task, connection, "http1_connection_outgoing_stream");
+ aws_channel_task_init(
+ &connection->cross_thread_work_task,
+ s_cross_thread_work_task,
+ connection,
+ "http1_connection_cross_thread_work");
+ aws_linked_list_init(&connection->thread_data.stream_list);
+ aws_linked_list_init(&connection->thread_data.read_buffer.messages);
+ aws_crt_statistics_http1_channel_init(&connection->thread_data.stats);
+
+ int err = aws_mutex_init(&connection->synced_data.lock);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to initialize mutex, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error_mutex;
+ }
+
+ aws_linked_list_init(&connection->synced_data.new_client_stream_list);
+ connection->synced_data.is_open = true;
+
+ struct aws_h1_decoder_params options = {
+ .alloc = alloc,
+ .is_decoding_requests = server,
+ .user_data = connection,
+ .vtable = s_h1_decoder_vtable,
+ .scratch_space_initial_size = DECODER_INITIAL_SCRATCH_SIZE,
+ };
+ connection->thread_data.incoming_stream_decoder = aws_h1_decoder_new(&options);
+ if (!connection->thread_data.incoming_stream_decoder) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create decoder, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error_decoder;
+ }
+
+ return connection;
+
+error_decoder:
+ aws_mutex_clean_up(&connection->synced_data.lock);
+error_mutex:
+ aws_mem_release(alloc, connection);
+error_connection_alloc:
+ return NULL;
+}
+
+struct aws_http_connection *aws_http_connection_new_http1_1_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options) {
+
+ struct aws_h1_connection *connection =
+ s_connection_new(allocator, manual_window_management, initial_window_size, http1_options, true /*is_server*/);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.server_data = &connection->base.client_or_server_data.server;
+
+ return &connection->base;
+}
+
+struct aws_http_connection *aws_http_connection_new_http1_1_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options) {
+
+ struct aws_h1_connection *connection =
+ s_connection_new(allocator, manual_window_management, initial_window_size, http1_options, false /*is_server*/);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.client_data = &connection->base.client_or_server_data.client;
+
+ return &connection->base;
+}
+
+static void s_handler_destroy(struct aws_channel_handler *handler) {
+ struct aws_h1_connection *connection = handler->impl;
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "id=%p: Destroying connection.", (void *)&connection->base);
+
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.stream_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.new_client_stream_list));
+
+ /* Clean up any buffered read messages. */
+ while (!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.read_buffer.messages);
+ struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle);
+ aws_mem_release(msg->allocator, msg);
+ }
+
+ aws_h1_decoder_destroy(connection->thread_data.incoming_stream_decoder);
+ aws_h1_encoder_clean_up(&connection->thread_data.encoder);
+ aws_mutex_clean_up(&connection->synced_data.lock);
+ aws_mem_release(connection->base.alloc, connection);
+}
+
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) {
+ struct aws_h1_connection *connection = handler->impl;
+ connection->base.channel_slot = slot;
+
+ /* Acquire a hold on the channel to prevent its destruction until the user has
+ * given the go-ahead via aws_http_connection_release() */
+ aws_channel_acquire_hold(slot->channel);
+}
+
+/* Try to send the next queued aws_io_message to the downstream handler.
+ * This can only be called after the connection has switched protocols and becoming a midchannel handler. */
+static int s_try_process_next_midchannel_read_message(struct aws_h1_connection *connection, bool *out_stop_processing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(connection->thread_data.has_switched_protocols);
+ AWS_ASSERT(!connection->thread_data.is_reading_stopped);
+ AWS_ASSERT(!aws_linked_list_empty(&connection->thread_data.read_buffer.messages));
+
+ *out_stop_processing = false;
+ struct aws_io_message *sending_msg = NULL;
+
+ if (!connection->base.channel_slot->adj_right) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection has switched protocols, but no handler is installed to deal with this data.",
+ (void *)connection);
+
+ return aws_raise_error(AWS_ERROR_HTTP_SWITCHED_PROTOCOLS);
+ }
+
+ size_t downstream_window = aws_channel_slot_downstream_read_window(connection->base.channel_slot);
+ if (downstream_window == 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Downstream window is 0, cannot send switched-protocol message now.",
+ (void *)&connection->base);
+
+ *out_stop_processing = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_linked_list_node *queued_msg_node = aws_linked_list_front(&connection->thread_data.read_buffer.messages);
+ struct aws_io_message *queued_msg = AWS_CONTAINER_OF(queued_msg_node, struct aws_io_message, queueing_handle);
+
+ /* Note that copy_mark is used to mark the progress of partially sent messages. */
+ AWS_ASSERT(queued_msg->message_data.len > queued_msg->copy_mark);
+ size_t sending_bytes = aws_min_size(queued_msg->message_data.len - queued_msg->copy_mark, downstream_window);
+
+ AWS_ASSERT(connection->thread_data.read_buffer.pending_bytes >= sending_bytes);
+ connection->thread_data.read_buffer.pending_bytes -= sending_bytes;
+
+ /* If we can't send the whole entire queued_msg, copy its data into a new aws_io_message and send that. */
+ if (sending_bytes != queued_msg->message_data.len) {
+ sending_msg = aws_channel_acquire_message_from_pool(
+ connection->base.channel_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, sending_bytes);
+ if (!sending_msg) {
+ goto error;
+ }
+
+ aws_byte_buf_write(
+ &sending_msg->message_data, queued_msg->message_data.buffer + queued_msg->copy_mark, sending_bytes);
+
+ queued_msg->copy_mark += sending_bytes;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Sending %zu bytes switched-protocol message to downstream handler, %zu bytes remain.",
+ (void *)&connection->base,
+ sending_bytes,
+ queued_msg->message_data.len - queued_msg->copy_mark);
+
+ /* If the last of queued_msg has been copied, it can be deleted now. */
+ if (queued_msg->copy_mark == queued_msg->message_data.len) {
+ aws_linked_list_remove(queued_msg_node);
+ aws_mem_release(queued_msg->allocator, queued_msg);
+ }
+ } else {
+ /* Sending all of queued_msg along. */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Sending full switched-protocol message of size %zu to downstream handler.",
+ (void *)&connection->base,
+ queued_msg->message_data.len);
+
+ aws_linked_list_remove(queued_msg_node);
+ sending_msg = queued_msg;
+ }
+
+ int err = aws_channel_slot_send_message(connection->base.channel_slot, sending_msg, AWS_CHANNEL_DIR_READ);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to send message in read direction, error %d (%s).",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ if (sending_msg) {
+ aws_mem_release(sending_msg->allocator, sending_msg);
+ }
+ return AWS_OP_ERR;
+}
+
+static struct aws_http_stream *s_new_server_request_handler_stream(
+ const struct aws_http_request_handler_options *options) {
+
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(options->server_connection, struct aws_h1_connection, base);
+
+ if (!aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel) ||
+ !connection->thread_data.can_create_request_handler_stream) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: aws_http_stream_new_server_request_handler() can only be called during incoming request callback.",
+ (void *)&connection->base);
+
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return NULL;
+ }
+
+ struct aws_h1_stream *stream = aws_h1_stream_new_request_handler(options);
+ if (!stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to create request handler stream, error %d (%s).",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return NULL;
+ }
+
+ /*
+ * Success!
+ * Everything beyond this point cannot fail
+ */
+
+ /* Prevent further streams from being created until it's ok to do so. */
+ connection->thread_data.can_create_request_handler_stream = false;
+
+ /* Stream is waiting for response. */
+ aws_linked_list_push_back(&connection->thread_data.stream_list, &stream->node);
+
+ /* Connection owns stream, and must outlive stream */
+ aws_http_connection_acquire(&connection->base);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Created request handler stream on server connection=%p",
+ (void *)&stream->base,
+ (void *)&connection->base);
+
+ return &stream->base;
+}
+
+/* Invokes the on_incoming_request callback and returns new stream. */
+static struct aws_h1_stream *s_server_invoke_on_incoming_request(struct aws_h1_connection *connection) {
+ AWS_PRECONDITION(connection->base.server_data);
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(!connection->thread_data.can_create_request_handler_stream);
+ AWS_PRECONDITION(!connection->thread_data.incoming_stream);
+
+ /**
+ * The user MUST create the new request-handler stream during the on-incoming-request callback.
+ */
+ connection->thread_data.can_create_request_handler_stream = true;
+
+ struct aws_http_stream *new_stream =
+ connection->base.server_data->on_incoming_request(&connection->base, connection->base.user_data);
+
+ connection->thread_data.can_create_request_handler_stream = false;
+
+ return new_stream ? AWS_CONTAINER_OF(new_stream, struct aws_h1_stream, base) : NULL;
+}
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ (void)slot;
+ struct aws_h1_connection *connection = handler->impl;
+ const size_t message_size = message->message_data.len;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Incoming message of size %zu.", (void *)&connection->base, message_size);
+
+ /* Shrink connection window by amount of data received. See comments at variable's
+ * declaration site on why we use this instead of the official `aws_channel_slot.window_size`. */
+ if (message_size > connection->thread_data.connection_window) {
+ /* This error shouldn't be possible, but this is all complicated so check at runtime to be safe. */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Internal error. Message exceeds connection's window.",
+ (void *)&connection->base);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ connection->thread_data.connection_window -= message_size;
+
+ /* Push message into queue of buffered messages */
+ aws_linked_list_push_back(&connection->thread_data.read_buffer.messages, &message->queueing_handle);
+ connection->thread_data.read_buffer.pending_bytes += message_size;
+
+ /* Try to process messages in queue */
+ aws_h1_connection_try_process_read_messages(connection);
+ return AWS_OP_SUCCESS;
+}
+
+void aws_h1_connection_try_process_read_messages(struct aws_h1_connection *connection) {
+
+ /* Protect against this function being called recursively. */
+ if (connection->thread_data.is_processing_read_messages) {
+ return;
+ }
+ connection->thread_data.is_processing_read_messages = true;
+
+ /* Process queued messages */
+ while (!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)) {
+ if (connection->thread_data.is_reading_stopped) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot process message because connection is shutting down.",
+ (void *)&connection->base);
+
+ aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ goto shutdown;
+ }
+
+ bool stop_processing = false;
+
+ /* When connection has switched protocols, messages are processed very differently.
+ * We need to do this check in the middle of the normal processing loop,
+ * in case the switch happens in the middle of processing a message. */
+ if (connection->thread_data.has_switched_protocols) {
+ if (s_try_process_next_midchannel_read_message(connection, &stop_processing)) {
+ goto shutdown;
+ }
+ } else {
+ if (s_try_process_next_stream_read_message(connection, &stop_processing)) {
+ goto shutdown;
+ }
+ }
+
+ /* Break out of loop if we can't process any more data */
+ if (stop_processing) {
+ break;
+ }
+ }
+
+ /* Increment connection window, if necessary */
+ if (s_update_connection_window(connection)) {
+ goto shutdown;
+ }
+
+ connection->thread_data.is_processing_read_messages = false;
+ return;
+
+shutdown:
+ s_shutdown_due_to_error(connection, aws_last_error());
+}
+
+/* Try to process the next queued aws_io_message as normal HTTP data for an aws_http_stream.
+ * This MUST NOT be called if the connection has switched protocols and become a midchannel handler. */
+static int s_try_process_next_stream_read_message(struct aws_h1_connection *connection, bool *out_stop_processing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(!connection->thread_data.has_switched_protocols);
+ AWS_ASSERT(!connection->thread_data.is_reading_stopped);
+ AWS_ASSERT(!aws_linked_list_empty(&connection->thread_data.read_buffer.messages));
+
+ *out_stop_processing = false;
+
+ /* Ensure that an incoming stream exists to receive the data */
+ if (!connection->thread_data.incoming_stream) {
+ if (aws_http_connection_is_client(&connection->base)) {
+ /* Client side */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot process message because no requests are currently awaiting response, closing "
+ "connection.",
+ (void *)&connection->base);
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+
+ } else {
+ /* Server side.
+ * Invoke on-incoming-request callback. The user MUST create a new stream from this callback.
+ * The new stream becomes the current incoming stream */
+ s_set_incoming_stream_ptr(connection, s_server_invoke_on_incoming_request(connection));
+ if (!connection->thread_data.incoming_stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Incoming request callback failed to provide a new stream, last error %d (%s). "
+ "Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+ }
+
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+
+ /* Stop processing if stream's window reaches 0. */
+ const uint64_t stream_window = incoming_stream->thread_data.stream_window;
+ if (stream_window == 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: HTTP-stream's window is 0, cannot process message now.",
+ (void *)&connection->base);
+ *out_stop_processing = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_linked_list_node *queued_msg_node = aws_linked_list_front(&connection->thread_data.read_buffer.messages);
+ struct aws_io_message *queued_msg = AWS_CONTAINER_OF(queued_msg_node, struct aws_io_message, queueing_handle);
+
+ /* Note that copy_mark is used to mark the progress of partially decoded messages */
+ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&queued_msg->message_data);
+ aws_byte_cursor_advance(&message_cursor, queued_msg->copy_mark);
+
+ /* Don't process more data than the stream's window can accept.
+ *
+ * TODO: Let the decoder know about stream-window size so it can stop itself,
+ * instead of limiting the amount of data we feed into the decoder at a time.
+ * This would be more optimal, AND avoid an edge-case where the stream-window goes
+ * to 0 as the body ends, and the connection can't proceed to the trailing headers.
+ */
+ message_cursor.len = (size_t)aws_min_u64(message_cursor.len, stream_window);
+
+ const size_t prev_cursor_len = message_cursor.len;
+
+ /* Set some decoder state, based on current stream */
+ aws_h1_decoder_set_logging_id(connection->thread_data.incoming_stream_decoder, incoming_stream);
+
+ bool body_headers_ignored = incoming_stream->base.request_method == AWS_HTTP_METHOD_HEAD;
+ aws_h1_decoder_set_body_headers_ignored(connection->thread_data.incoming_stream_decoder, body_headers_ignored);
+
+ /* As decoder runs, it invokes the internal s_decoder_X callbacks, which in turn invoke user callbacks.
+ * The decoder will stop once it hits the end of the request/response OR the end of the message data. */
+ if (aws_h1_decode(connection->thread_data.incoming_stream_decoder, &message_cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Message processing failed, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+
+ size_t bytes_processed = prev_cursor_len - message_cursor.len;
+ queued_msg->copy_mark += bytes_processed;
+
+ AWS_ASSERT(connection->thread_data.read_buffer.pending_bytes >= bytes_processed);
+ connection->thread_data.read_buffer.pending_bytes -= bytes_processed;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Decoded %zu bytes of message, %zu bytes remain.",
+ (void *)&connection->base,
+ bytes_processed,
+ queued_msg->message_data.len - queued_msg->copy_mark);
+
+ /* If the last of queued_msg has been processed, it can be deleted now.
+ * Otherwise, it remains in the queue for further processing later. */
+ if (queued_msg->copy_mark == queued_msg->message_data.len) {
+ aws_linked_list_remove(&queued_msg->queueing_handle);
+ aws_mem_release(queued_msg->allocator, queued_msg);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ struct aws_h1_connection *connection = handler->impl;
+
+ if (connection->thread_data.is_writing_stopped) {
+ aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ goto error;
+ }
+
+ if (!connection->thread_data.has_switched_protocols) {
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ /* Pass the message right along. */
+ int err = aws_channel_slot_send_message(slot, message, AWS_CHANNEL_DIR_WRITE);
+ if (err) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Destroying write message without passing it along, error %d (%s)",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ if (message->on_completion) {
+ message->on_completion(connection->base.channel_slot->channel, message, aws_last_error(), message->user_data);
+ }
+ aws_mem_release(message->allocator, message);
+ s_shutdown_due_to_error(connection, aws_last_error());
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size) {
+
+ (void)slot;
+ struct aws_h1_connection *connection = handler->impl;
+
+ if (!connection->thread_data.has_switched_protocols) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: HTTP connection cannot have a downstream handler without first switching protocols",
+ (void *)&connection->base);
+
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Handler in read direction incremented read window by %zu. Sending queued messages, if any.",
+ (void *)&connection->base,
+ size);
+
+ /* Send along any queued messages, and increment connection's window if necessary */
+ aws_h1_connection_try_process_read_messages(connection);
+ return AWS_OP_SUCCESS;
+
+error:
+ s_shutdown_due_to_error(connection, aws_last_error());
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ (void)free_scarce_resources_immediately;
+ struct aws_h1_connection *connection = handler->impl;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Channel shutting down in %s direction with error code %d (%s).",
+ (void *)&connection->base,
+ (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write",
+ error_code,
+ aws_error_name(error_code));
+
+ if (dir == AWS_CHANNEL_DIR_READ) {
+ /* This call ensures that no further streams will be created or worked on. */
+ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, error_code);
+ } else /* dir == AWS_CHANNEL_DIR_WRITE */ {
+
+ s_stop(connection, false /*stop_reading*/, true /*stop_writing*/, false /*schedule_shutdown*/, error_code);
+
+ /* Mark all pending streams as complete. */
+ int stream_error_code = error_code == AWS_ERROR_SUCCESS ? AWS_ERROR_HTTP_CONNECTION_CLOSED : error_code;
+
+ while (!aws_linked_list_empty(&connection->thread_data.stream_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_front(&connection->thread_data.stream_list);
+ s_stream_complete(AWS_CONTAINER_OF(node, struct aws_h1_stream, node), stream_error_code);
+ }
+
+ /* It's OK to access synced_data.new_client_stream_list without holding the lock because
+ * no more streams can be added after s_stop() has been invoked. */
+ while (!aws_linked_list_empty(&connection->synced_data.new_client_stream_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_front(&connection->synced_data.new_client_stream_list);
+ s_stream_complete(AWS_CONTAINER_OF(node, struct aws_h1_stream, node), stream_error_code);
+ }
+ }
+
+ aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately);
+ return AWS_OP_SUCCESS;
+}
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) {
+ struct aws_h1_connection *connection = handler->impl;
+ return connection->thread_data.connection_window;
+}
+
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+ return 0;
+}
+
+static void s_reset_statistics(struct aws_channel_handler *handler) {
+ struct aws_h1_connection *connection = handler->impl;
+
+ aws_crt_statistics_http1_channel_reset(&connection->thread_data.stats);
+}
+
+static void s_pull_up_stats_timestamps(struct aws_h1_connection *connection) {
+ uint64_t now_ns = 0;
+ if (aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns)) {
+ return;
+ }
+
+ if (connection->thread_data.outgoing_stream) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+
+ connection->thread_data.outgoing_stream_timestamp_ns = now_ns;
+
+ connection->thread_data.stats.current_outgoing_stream_id =
+ aws_http_stream_get_id(&connection->thread_data.outgoing_stream->base);
+ }
+
+ if (connection->thread_data.incoming_stream) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+
+ connection->thread_data.incoming_stream_timestamp_ns = now_ns;
+
+ connection->thread_data.stats.current_incoming_stream_id =
+ aws_http_stream_get_id(&connection->thread_data.incoming_stream->base);
+ }
+}
+
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) {
+ struct aws_h1_connection *connection = handler->impl;
+
+ /* TODO: Need update the way we calculate statistics, to account for user-controlled pauses.
+ * If user is adding chunks 1 by 1, there can naturally be a gap in the upload.
+ * If the user lets the stream-window go to zero, there can naturally be a gap in the download. */
+ s_pull_up_stats_timestamps(connection);
+
+ void *stats_base = &connection->thread_data.stats;
+ aws_array_list_push_back(stats, &stats_base);
+}
+
+struct aws_crt_statistics_http1_channel *aws_h1_connection_get_statistics(struct aws_http_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->channel_slot->channel));
+
+ struct aws_h1_connection *h1_conn = (void *)connection;
+
+ return &h1_conn->thread_data.stats;
+}
+
+struct aws_h1_window_stats aws_h1_connection_window_stats(struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+ struct aws_h1_window_stats stats = {
+ .connection_window = connection->thread_data.connection_window,
+ .buffer_capacity = connection->thread_data.read_buffer.capacity,
+ .buffer_pending_bytes = connection->thread_data.read_buffer.pending_bytes,
+ .recent_window_increments = connection->thread_data.recent_window_increments,
+ .has_incoming_stream = connection->thread_data.incoming_stream != NULL,
+ .stream_window = connection->thread_data.incoming_stream
+ ? connection->thread_data.incoming_stream->thread_data.stream_window
+ : 0,
+ };
+
+ /* Resets each time it's queried */
+ connection->thread_data.recent_window_increments = 0;
+
+ return stats;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_decoder.c b/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
new file mode 100644
index 00000000000..68e5aa224ae
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
@@ -0,0 +1,761 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/string.h>
+#include <aws/http/private/h1_decoder.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_chunked, "chunked");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_compress, "compress");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_x_compress, "x-compress");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_deflate, "deflate");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_gzip, "gzip");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_x_gzip, "x-gzip");
+
+/* Decoder runs a state machine.
+ * Each state consumes data until it sets the next state.
+ * A common state is the "line state", which handles consuming one line ending in CRLF
+ * and feeding the line to a linestate_fn, which should process data and set the next state.
+ */
+typedef int(state_fn)(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input);
+typedef int(linestate_fn)(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+
+struct aws_h1_decoder {
+ /* Implementation data. */
+ struct aws_allocator *alloc;
+ struct aws_byte_buf scratch_space;
+ state_fn *run_state;
+ linestate_fn *process_line;
+ int transfer_encoding;
+ uint64_t content_processed;
+ uint64_t content_length;
+ uint64_t chunk_processed;
+ uint64_t chunk_size;
+ bool doing_trailers;
+ bool is_done;
+ bool body_headers_ignored;
+ bool body_headers_forbidden;
+ enum aws_http_header_block header_block;
+ const void *logging_id;
+
+ /* User callbacks and settings. */
+ struct aws_h1_decoder_vtable vtable;
+ bool is_decoding_requests;
+ void *user_data;
+};
+
+static int s_linestate_request(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+static int s_linestate_response(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+static int s_linestate_header(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+static int s_linestate_chunk_size(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+
+static bool s_scan_for_crlf(struct aws_h1_decoder *decoder, struct aws_byte_cursor input, size_t *bytes_processed) {
+ AWS_ASSERT(input.len > 0);
+
+ /* In a loop, scan for "\n", then look one char back for "\r" */
+ uint8_t *ptr = input.ptr;
+ uint8_t *end = input.ptr + input.len;
+ while (ptr != end) {
+ uint8_t *newline = (uint8_t *)memchr(ptr, '\n', end - ptr);
+ if (!newline) {
+ break;
+ }
+
+ uint8_t prev_char;
+ if (newline == input.ptr) {
+ /* If "\n" is first character check scratch_space for previous character */
+ if (decoder->scratch_space.len > 0) {
+ prev_char = decoder->scratch_space.buffer[decoder->scratch_space.len - 1];
+ } else {
+ prev_char = 0;
+ }
+ } else {
+ prev_char = *(newline - 1);
+ }
+
+ if (prev_char == '\r') {
+ *bytes_processed = 1 + (newline - input.ptr);
+ return true;
+ }
+
+ ptr = newline + 1;
+ }
+
+ *bytes_processed = input.len;
+ return false;
+}
+
+/* This state consumes an entire line, then calls a linestate_fn to process the line. */
+static int s_state_getline(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) {
+ /* If preceding runs of this state failed to find CRLF, their data is stored in the scratch_space
+ * and new data needs to be combined with the old data for processing. */
+ bool has_prev_data = decoder->scratch_space.len;
+
+ size_t line_length = 0;
+ bool found_crlf = s_scan_for_crlf(decoder, *input, &line_length);
+
+ /* Found end of line! Run the line processor on it */
+ struct aws_byte_cursor line = aws_byte_cursor_advance(input, line_length);
+
+ bool use_scratch = !found_crlf | has_prev_data;
+ if (AWS_UNLIKELY(use_scratch)) {
+ if (aws_byte_buf_append_dynamic(&decoder->scratch_space, &line)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Internal buffer write failed with error code %d (%s)",
+ decoder->logging_id,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ /* Line is actually the entire scratch buffer now */
+ line = aws_byte_cursor_from_buf(&decoder->scratch_space);
+ }
+
+ if (AWS_LIKELY(found_crlf)) {
+ /* Backup so "\r\n" is not included. */
+ /* RFC-7230 section 3 Message Format */
+ AWS_ASSERT(line.len >= 2);
+ line.len -= 2;
+
+ return decoder->process_line(decoder, line);
+ }
+
+ /* Didn't find crlf, we'll continue scanning when more data comes in */
+ return AWS_OP_SUCCESS;
+}
+
+static int s_cursor_split_impl(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *cursor_array,
+ size_t num_cursors,
+ bool error_if_more_splits_possible) {
+
+ struct aws_byte_cursor split;
+ AWS_ZERO_STRUCT(split);
+ for (size_t i = 0; i < num_cursors; ++i) {
+ if (!aws_byte_cursor_next_split(&input, split_on, &split)) {
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ cursor_array[i] = split;
+ }
+
+ if (error_if_more_splits_possible) {
+ if (aws_byte_cursor_next_split(&input, split_on, &split)) {
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ } else {
+ /* Otherwise, the last cursor will contain the remainder of the string */
+ struct aws_byte_cursor *last_cursor = &cursor_array[num_cursors - 1];
+ last_cursor->len = (input.ptr + input.len) - last_cursor->ptr;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Final cursor contains remainder of input. */
+static int s_cursor_split_first_n_times(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *cursor_array,
+ size_t num_cursors) {
+
+ return s_cursor_split_impl(input, split_on, cursor_array, num_cursors, false);
+}
+
+/* Error if input could have been split more times */
+static int s_cursor_split_exactly_n_times(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *cursor_array,
+ size_t num_cursors) {
+
+ return s_cursor_split_impl(input, split_on, cursor_array, num_cursors, true);
+}
+
+static void s_set_state(struct aws_h1_decoder *decoder, state_fn *state) {
+ decoder->scratch_space.len = 0;
+ decoder->run_state = state;
+ decoder->process_line = NULL;
+}
+
+/* Set next state to capture a full line, then call the specified linestate_fn on it */
+static void s_set_line_state(struct aws_h1_decoder *decoder, linestate_fn *line_processor) {
+ s_set_state(decoder, s_state_getline);
+ decoder->process_line = line_processor;
+}
+
+static int s_mark_done(struct aws_h1_decoder *decoder) {
+ decoder->is_done = true;
+
+ return decoder->vtable.on_done(decoder->user_data);
+}
+
+/* Reset state, in preparation for processing a new message */
+static void s_reset_state(struct aws_h1_decoder *decoder) {
+ if (decoder->is_decoding_requests) {
+ s_set_line_state(decoder, s_linestate_request);
+ } else {
+ s_set_line_state(decoder, s_linestate_response);
+ }
+
+ decoder->transfer_encoding = 0;
+ decoder->content_processed = 0;
+ decoder->content_length = 0;
+ decoder->chunk_processed = 0;
+ decoder->chunk_size = 0;
+ decoder->doing_trailers = false;
+ decoder->is_done = false;
+ decoder->body_headers_ignored = false;
+ decoder->body_headers_forbidden = false;
+ /* set to normal by default */
+ decoder->header_block = AWS_HTTP_HEADER_BLOCK_MAIN;
+}
+
+static int s_state_unchunked_body(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) {
+
+ size_t processed_bytes = 0;
+ AWS_FATAL_ASSERT(decoder->content_processed < decoder->content_length); /* shouldn't be possible */
+
+ if (input->len > (decoder->content_length - decoder->content_processed)) {
+ processed_bytes = (size_t)(decoder->content_length - decoder->content_processed);
+ } else {
+ processed_bytes = input->len;
+ }
+
+ decoder->content_processed += processed_bytes;
+
+ bool finished = decoder->content_processed == decoder->content_length;
+ struct aws_byte_cursor body = aws_byte_cursor_advance(input, processed_bytes);
+ int err = decoder->vtable.on_body(&body, finished, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (AWS_LIKELY(finished)) {
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_chunk_terminator(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+
+ /* Expecting CRLF at end of each chunk */
+ /* RFC-7230 section 4.1 Chunked Transfer Encoding */
+ if (AWS_UNLIKELY(input.len != 0)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming chunk is invalid, does not end with CRLF.", decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ s_set_line_state(decoder, s_linestate_chunk_size);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_state_chunk(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) {
+ size_t processed_bytes = 0;
+ AWS_ASSERT(decoder->chunk_processed < decoder->chunk_size);
+
+ if (input->len > (decoder->chunk_size - decoder->chunk_processed)) {
+ processed_bytes = (size_t)(decoder->chunk_size - decoder->chunk_processed);
+ } else {
+ processed_bytes = input->len;
+ }
+
+ decoder->chunk_processed += processed_bytes;
+
+ bool finished = decoder->chunk_processed == decoder->chunk_size;
+ struct aws_byte_cursor body = aws_byte_cursor_advance(input, processed_bytes);
+ int err = decoder->vtable.on_body(&body, false, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (AWS_LIKELY(finished)) {
+ s_set_line_state(decoder, s_linestate_chunk_terminator);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_chunk_size(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ struct aws_byte_cursor size;
+ AWS_ZERO_STRUCT(size);
+ if (!aws_byte_cursor_next_split(&input, ';', &size)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming chunk is invalid, first line is malformed.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad chunk line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+
+ return AWS_OP_ERR;
+ }
+
+ int err = aws_byte_cursor_utf8_parse_u64_hex(size, &decoder->chunk_size);
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Failed to parse size of incoming chunk.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad chunk size is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(size));
+
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ decoder->chunk_processed = 0;
+
+ /* Empty chunk signifies all chunks have been read. */
+ if (AWS_UNLIKELY(decoder->chunk_size == 0)) {
+ struct aws_byte_cursor cursor;
+ cursor.ptr = NULL;
+ cursor.len = 0;
+ err = decoder->vtable.on_body(&cursor, true, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* Expected empty newline and end of message. */
+ decoder->doing_trailers = true;
+ s_set_line_state(decoder, s_linestate_header);
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Skip all chunk extensions, as they are optional. */
+ /* RFC-7230 section 4.1.1 Chunk Extensions */
+
+ s_set_state(decoder, s_state_chunk);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_header(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ int err;
+
+ /* The \r\n was just processed by `s_state_getline`. */
+ /* Empty line signifies end of headers, and beginning of body or end of trailers. */
+ /* RFC-7230 section 3 Message Format */
+ if (input.len == 0) {
+ if (AWS_LIKELY(!decoder->doing_trailers)) {
+ if (decoder->body_headers_ignored) {
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ } else if (decoder->transfer_encoding & AWS_HTTP_TRANSFER_ENCODING_CHUNKED) {
+ s_set_line_state(decoder, s_linestate_chunk_size);
+ } else if (decoder->content_length > 0) {
+ s_set_state(decoder, s_state_unchunked_body);
+ } else {
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+ } else {
+ /* Empty line means end of message. */
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Each header field consists of a case-insensitive field name followed by a colon (":"),
+ * optional leading whitespace, the field value, and optional trailing whitespace.
+ * RFC-7230 3.2 */
+ struct aws_byte_cursor splits[2];
+ err = s_cursor_split_first_n_times(input, ':', splits, 2); /* value may contain more colons */
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, missing colon.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor name = splits[0];
+ if (!aws_strutil_is_http_token(name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, bad name.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor value = aws_strutil_trim_http_whitespace(splits[1]);
+ if (!aws_strutil_is_http_field_value(value)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, bad value.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_h1_decoded_header header;
+ header.name = aws_http_str_to_header_name(name);
+ header.name_data = name;
+ header.value_data = value;
+ header.data = input;
+
+ switch (header.name) {
+ case AWS_HTTP_HEADER_CONTENT_LENGTH:
+ if (decoder->transfer_encoding) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for both content-length and transfer-encoding received. This is illegal.",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (aws_byte_cursor_utf8_parse_u64(header.value_data, &decoder->content_length)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming content-length header has invalid value.",
+ decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad content-length value is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(header.value_data));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (decoder->body_headers_forbidden && decoder->content_length != 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for content-length received, but it is illegal for this message to have a "
+ "body",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ break;
+
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING: {
+ if (decoder->content_length) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for both content-length and transfer-encoding received. This is illegal.",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (decoder->body_headers_forbidden) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for transfer-encoding received, but it is illegal for this message to "
+ "have a body",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ /* RFC-7230 section 3.3.1 Transfer-Encoding */
+ /* RFC-7230 section 4.2 Compression Codings */
+
+ /* Note that it's possible for multiple Transfer-Encoding headers to exist, in which case the values
+ * should be appended with those from any previously encountered Transfer-Encoding headers. */
+ struct aws_byte_cursor split;
+ AWS_ZERO_STRUCT(split);
+ while (aws_byte_cursor_next_split(&header.value_data, ',', &split)) {
+ struct aws_byte_cursor coding = aws_strutil_trim_http_whitespace(split);
+ int prev_flags = decoder->transfer_encoding;
+
+ if (aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_chunked, &coding)) {
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_CHUNKED;
+
+ } else if (
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_compress, &coding) ||
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_x_compress, &coding)) {
+ /* A recipient SHOULD consider "x-compress" to be equivalent to "compress". RFC-7230 4.2.1 */
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_DEPRECATED_COMPRESS;
+
+ } else if (aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_deflate, &coding)) {
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_DEFLATE;
+
+ } else if (
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_gzip, &coding) ||
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_x_gzip, &coding)) {
+ /* A recipient SHOULD consider "x-gzip" to be equivalent to "gzip". RFC-7230 4.2.3 */
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_GZIP;
+
+ } else if (coding.len > 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming transfer-encoding header lists unrecognized coding.",
+ decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Unrecognized coding is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(coding));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ /* If any transfer coding other than chunked is applied to a request payload body, the sender MUST
+ * apply chunked as the final transfer coding to ensure that the message is properly framed.
+ * RFC-7230 3.3.1 */
+ if ((prev_flags & AWS_HTTP_TRANSFER_ENCODING_CHUNKED) && (decoder->transfer_encoding != prev_flags)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming transfer-encoding header lists a coding after 'chunked', this is illegal.",
+ decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Misplaced coding is '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(coding));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ }
+
+ /* TODO: deal with body of indeterminate length, marking it as successful when connection is closed:
+ *
+ * A response that has neither chunked transfer coding nor Content-Length is terminated by closure of
+ * the connection and, thus, is considered complete regardless of the number of message body octets
+ * received, provided that the header section was received intact.
+ * RFC-7230 3.4 */
+ } break;
+
+ default:
+ break;
+ }
+
+ err = decoder->vtable.on_header(&header, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ s_set_line_state(decoder, s_linestate_header);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_request(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ struct aws_byte_cursor cursors[3];
+ int err = s_cursor_split_exactly_n_times(input, ' ', cursors, 3); /* extra spaces not allowed */
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming request line has wrong number of spaces.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ for (size_t i = 0; i < AWS_ARRAY_SIZE(cursors); ++i) {
+ if (cursors[i].len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request line has empty values.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ }
+
+ struct aws_byte_cursor method = cursors[0];
+ struct aws_byte_cursor uri = cursors[1];
+ struct aws_byte_cursor version = cursors[2];
+
+ if (!aws_strutil_is_http_token(method)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request has invalid method.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (!aws_strutil_is_http_request_target(uri)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request has invalid path.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor version_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+ if (!aws_byte_cursor_eq(&version, &version_expected)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming request uses unsupported HTTP version.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Unsupported version is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(version));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ err = decoder->vtable.on_request(aws_http_str_to_method(method), &method, &uri, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ s_set_line_state(decoder, s_linestate_header);
+
+ return AWS_OP_SUCCESS;
+}
+
+static bool s_check_info_response_status_code(int code_val) {
+ return code_val >= 100 && code_val < 200;
+}
+
+static int s_linestate_response(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ struct aws_byte_cursor cursors[3];
+ int err = s_cursor_split_first_n_times(input, ' ', cursors, 3); /* phrase may contain spaces */
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response status line is invalid.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad status line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor version = cursors[0];
+ struct aws_byte_cursor code = cursors[1];
+ struct aws_byte_cursor phrase = cursors[2];
+
+ struct aws_byte_cursor version_1_1_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+ struct aws_byte_cursor version_1_0_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_0);
+ if (!aws_byte_cursor_eq(&version, &version_1_1_expected) && !aws_byte_cursor_eq(&version, &version_1_0_expected)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming response uses unsupported HTTP version.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Unsupported version is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(version));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ /* Validate phrase */
+ if (!aws_strutil_is_http_reason_phrase(phrase)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response has invalid reason phrase.", decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ /* Status-code is a 3-digit integer. RFC7230 section 3.1.2 */
+ uint64_t code_val_u64;
+ err = aws_byte_cursor_utf8_parse_u64(code, &code_val_u64);
+ if (err || code.len != 3 || code_val_u64 > 999) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response has invalid status code.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad status code is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(code));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ int code_val = (int)code_val_u64;
+
+ /* RFC-7230 section 3.3 Message Body */
+ decoder->body_headers_ignored |= code_val == AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED;
+ decoder->body_headers_forbidden = code_val == AWS_HTTP_STATUS_CODE_204_NO_CONTENT || code_val / 100 == 1;
+
+ if (s_check_info_response_status_code(code_val)) {
+ decoder->header_block = AWS_HTTP_HEADER_BLOCK_INFORMATIONAL;
+ }
+
+ err = decoder->vtable.on_response(code_val, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ s_set_line_state(decoder, s_linestate_header);
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h1_decoder *aws_h1_decoder_new(struct aws_h1_decoder_params *params) {
+ AWS_ASSERT(params);
+
+ struct aws_h1_decoder *decoder = aws_mem_acquire(params->alloc, sizeof(struct aws_h1_decoder));
+ if (!decoder) {
+ return NULL;
+ }
+ AWS_ZERO_STRUCT(*decoder);
+
+ decoder->alloc = params->alloc;
+ decoder->user_data = params->user_data;
+ decoder->vtable = params->vtable;
+ decoder->is_decoding_requests = params->is_decoding_requests;
+
+ aws_byte_buf_init(&decoder->scratch_space, params->alloc, params->scratch_space_initial_size);
+
+ s_reset_state(decoder);
+
+ return decoder;
+}
+
+void aws_h1_decoder_destroy(struct aws_h1_decoder *decoder) {
+ if (!decoder) {
+ return;
+ }
+ aws_byte_buf_clean_up(&decoder->scratch_space);
+ aws_mem_release(decoder->alloc, decoder);
+}
+
+int aws_h1_decode(struct aws_h1_decoder *decoder, struct aws_byte_cursor *data) {
+ AWS_ASSERT(decoder);
+ AWS_ASSERT(data);
+
+ struct aws_byte_cursor backup = *data;
+
+ while (data->len && !decoder->is_done) {
+ int err = decoder->run_state(decoder, data);
+ if (err) {
+ /* Reset the data param to how we found it */
+ *data = backup;
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (decoder->is_done) {
+ s_reset_state(decoder);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_h1_decoder_get_encoding_flags(const struct aws_h1_decoder *decoder) {
+ return decoder->transfer_encoding;
+}
+
+uint64_t aws_h1_decoder_get_content_length(const struct aws_h1_decoder *decoder) {
+ return decoder->content_length;
+}
+
+bool aws_h1_decoder_get_body_headers_ignored(const struct aws_h1_decoder *decoder) {
+ return decoder->body_headers_ignored;
+}
+
+enum aws_http_header_block aws_h1_decoder_get_header_block(const struct aws_h1_decoder *decoder) {
+ return decoder->header_block;
+}
+
+void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id) {
+ decoder->logging_id = id;
+}
+
+void aws_h1_decoder_set_body_headers_ignored(struct aws_h1_decoder *decoder, bool body_headers_ignored) {
+ decoder->body_headers_ignored = body_headers_ignored;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_encoder.c b/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
new file mode 100644
index 00000000000..1899d2f4025
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
@@ -0,0 +1,915 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/h1_encoder.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+#include <inttypes.h>
+
+#define ENCODER_LOGF(level, encoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_STREAM, "id=%p: " text, (void *)encoder->current_stream, __VA_ARGS__)
+#define ENCODER_LOG(level, encoder, text) ENCODER_LOGF(level, encoder, "%s", text)
+
+#define MAX_ASCII_HEX_CHUNK_STR_SIZE (sizeof(uint64_t) * 2 + 1)
+#define CRLF_SIZE 2
+
+/**
+ * Scan headers to detect errors and determine anything we'll need to know later (ex: total length).
+ */
+static int s_scan_outgoing_headers(
+ struct aws_h1_encoder_message *encoder_message,
+ const struct aws_http_message *message,
+ size_t *out_header_lines_len,
+ bool body_headers_ignored,
+ bool body_headers_forbidden) {
+
+ size_t total = 0;
+ bool has_body_stream = aws_http_message_get_body_stream(message);
+ bool has_content_length_header = false;
+ bool has_transfer_encoding_header = false;
+
+ const size_t num_headers = aws_http_message_get_header_count(message);
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_http_header header;
+ aws_http_message_get_header(message, &header, i);
+
+ /* Validate header field-name (RFC-7230 3.2): field-name = token */
+ if (!aws_strutil_is_http_token(header.name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Header name is invalid");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME);
+ }
+
+ /* Validate header field-value.
+ * The value itself isn't supposed to have whitespace on either side,
+ * but we'll trim it off before validation so we don't start needlessly
+ * failing requests that used to work before we added validation.
+ * This should be OK because field-value can be sent with any amount
+ * of whitespace around it, which the other side will just ignore (RFC-7230 3.2):
+ * header-field = field-name ":" OWS field-value OWS */
+ struct aws_byte_cursor field_value = aws_strutil_trim_http_whitespace(header.value);
+ if (!aws_strutil_is_http_field_value(field_value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Header '" PRInSTR "' has invalid value",
+ AWS_BYTE_CURSOR_PRI(header.name));
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ enum aws_http_header_name name_enum = aws_http_str_to_header_name(header.name);
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_CONNECTION: {
+ if (aws_byte_cursor_eq_c_str(&field_value, "close")) {
+ encoder_message->has_connection_close_header = true;
+ }
+ } break;
+ case AWS_HTTP_HEADER_CONTENT_LENGTH: {
+ has_content_length_header = true;
+ if (aws_byte_cursor_utf8_parse_u64(field_value, &encoder_message->content_length)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Invalid Content-Length");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ } break;
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING: {
+ has_transfer_encoding_header = true;
+ if (0 == field_value.len) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding must include a valid value");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ struct aws_byte_cursor substr;
+ AWS_ZERO_STRUCT(substr);
+ while (aws_byte_cursor_next_split(&field_value, ',', &substr)) {
+ struct aws_byte_cursor trimmed = aws_strutil_trim_http_whitespace(substr);
+ if (0 == trimmed.len) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Transfer-Encoding header whitespace only "
+ "comma delimited header value");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ if (encoder_message->has_chunked_encoding_header) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding header must end with \"chunked\"");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ if (aws_byte_cursor_eq_c_str(&trimmed, "chunked")) {
+ encoder_message->has_chunked_encoding_header = true;
+ }
+ }
+ } break;
+ default:
+ break;
+ }
+
+ /* header-line: "{name}: {value}\r\n" */
+ int err = 0;
+ err |= aws_add_size_checked(header.name.len, total, &total);
+ err |= aws_add_size_checked(header.value.len, total, &total);
+ err |= aws_add_size_checked(4, total, &total); /* ": " + "\r\n" */
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (!encoder_message->has_chunked_encoding_header && has_transfer_encoding_header) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding header must include \"chunked\"");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ /* Per RFC 7230: A sender MUST NOT send a Content-Length header field in any message that contains a
+ * Transfer-Encoding header field. */
+ if (encoder_message->has_chunked_encoding_header && has_content_length_header) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=static: Both Content-Length and Transfer-Encoding are set. Only one may be used");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ if (encoder_message->has_chunked_encoding_header && has_body_stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Both Transfer-Encoding chunked header and body stream is set. "
+ "chunked data must use the chunk API to write the body stream.");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_BODY_STREAM);
+ }
+
+ if (body_headers_forbidden && (encoder_message->content_length > 0 || has_transfer_encoding_header)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Transfer-Encoding or Content-Length headers may not be present in such a message");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_FIELD);
+ }
+
+ if (body_headers_ignored) {
+ /* Don't send body, no matter what the headers are */
+ encoder_message->content_length = 0;
+ encoder_message->has_chunked_encoding_header = false;
+ }
+
+ if (encoder_message->content_length > 0 && !has_body_stream) {
+ return aws_raise_error(AWS_ERROR_HTTP_MISSING_BODY_STREAM);
+ }
+
+ *out_header_lines_len = total;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_scan_outgoing_trailer(const struct aws_http_headers *headers, size_t *out_size) {
+ const size_t num_headers = aws_http_headers_count(headers);
+ size_t total = 0;
+ for (size_t i = 0; i < num_headers; i++) {
+ struct aws_http_header header;
+ aws_http_headers_get_index(headers, i, &header);
+ /* Validate header field-name (RFC-7230 3.2): field-name = token */
+ if (!aws_strutil_is_http_token(header.name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Header name is invalid");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME);
+ }
+
+ /* Validate header field-value.
+ * The value itself isn't supposed to have whitespace on either side,
+ * but we'll trim it off before validation so we don't start needlessly
+ * failing requests that used to work before we added validation.
+ * This should be OK because field-value can be sent with any amount
+ * of whitespace around it, which the other side will just ignore (RFC-7230 3.2):
+ * header-field = field-name ":" OWS field-value OWS */
+ struct aws_byte_cursor field_value = aws_strutil_trim_http_whitespace(header.value);
+ if (!aws_strutil_is_http_field_value(field_value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Header '" PRInSTR "' has invalid value",
+ AWS_BYTE_CURSOR_PRI(header.name));
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ enum aws_http_header_name name_enum = aws_http_str_to_header_name(header.name);
+ if (name_enum == AWS_HTTP_HEADER_TRANSFER_ENCODING || name_enum == AWS_HTTP_HEADER_CONTENT_LENGTH ||
+ name_enum == AWS_HTTP_HEADER_HOST || name_enum == AWS_HTTP_HEADER_EXPECT ||
+ name_enum == AWS_HTTP_HEADER_CACHE_CONTROL || name_enum == AWS_HTTP_HEADER_MAX_FORWARDS ||
+ name_enum == AWS_HTTP_HEADER_PRAGMA || name_enum == AWS_HTTP_HEADER_RANGE ||
+ name_enum == AWS_HTTP_HEADER_TE || name_enum == AWS_HTTP_HEADER_CONTENT_ENCODING ||
+ name_enum == AWS_HTTP_HEADER_CONTENT_TYPE || name_enum == AWS_HTTP_HEADER_CONTENT_RANGE ||
+ name_enum == AWS_HTTP_HEADER_TRAILER || name_enum == AWS_HTTP_HEADER_WWW_AUTHENTICATE ||
+ name_enum == AWS_HTTP_HEADER_AUTHORIZATION || name_enum == AWS_HTTP_HEADER_PROXY_AUTHENTICATE ||
+ name_enum == AWS_HTTP_HEADER_PROXY_AUTHORIZATION || name_enum == AWS_HTTP_HEADER_SET_COOKIE ||
+ name_enum == AWS_HTTP_HEADER_COOKIE || name_enum == AWS_HTTP_HEADER_AGE ||
+ name_enum == AWS_HTTP_HEADER_EXPIRES || name_enum == AWS_HTTP_HEADER_DATE ||
+ name_enum == AWS_HTTP_HEADER_LOCATION || name_enum == AWS_HTTP_HEADER_RETRY_AFTER ||
+ name_enum == AWS_HTTP_HEADER_VARY || name_enum == AWS_HTTP_HEADER_WARNING) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Trailing Header '" PRInSTR "' has invalid value",
+ AWS_BYTE_CURSOR_PRI(header.name));
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_FIELD);
+ }
+
+ int err = 0;
+ err |= aws_add_size_checked(header.name.len, total, &total);
+ err |= aws_add_size_checked(header.value.len, total, &total);
+ err |= aws_add_size_checked(4, total, &total); /* ": " + "\r\n" */
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+ if (aws_add_size_checked(2, total, &total)) { /* "\r\n" */
+ return AWS_OP_ERR;
+ }
+ *out_size = total;
+ return AWS_OP_SUCCESS;
+}
+
+static bool s_write_crlf(struct aws_byte_buf *dst) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dst));
+ struct aws_byte_cursor crlf_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\r\n");
+ return aws_byte_buf_write_from_whole_cursor(dst, crlf_cursor);
+}
+
+static void s_write_headers(struct aws_byte_buf *dst, const struct aws_http_headers *headers) {
+
+ const size_t num_headers = aws_http_headers_count(headers);
+
+ bool wrote_all = true;
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_http_header header;
+ aws_http_headers_get_index(headers, i, &header);
+
+ /* header-line: "{name}: {value}\r\n" */
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, header.name);
+ wrote_all &= aws_byte_buf_write_u8(dst, ':');
+ wrote_all &= aws_byte_buf_write_u8(dst, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, header.value);
+ wrote_all &= s_write_crlf(dst);
+ }
+ AWS_ASSERT(wrote_all);
+ (void)wrote_all;
+}
+
+int aws_h1_encoder_message_init_from_request(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *request,
+ struct aws_linked_list *pending_chunk_list) {
+
+ AWS_PRECONDITION(aws_linked_list_is_valid(pending_chunk_list));
+
+ AWS_ZERO_STRUCT(*message);
+
+ message->body = aws_input_stream_acquire(aws_http_message_get_body_stream(request));
+ message->pending_chunk_list = pending_chunk_list;
+
+ struct aws_byte_cursor method;
+ int err = aws_http_message_get_request_method(request, &method);
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request method not set");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD);
+ goto error;
+ }
+ /* RFC-7230 3.1.1: method = token */
+ if (!aws_strutil_is_http_token(method)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request method is invalid");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD);
+ goto error;
+ }
+
+ struct aws_byte_cursor uri;
+ err = aws_http_message_get_request_path(request, &uri);
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request path not set");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH);
+ goto error;
+ }
+ if (!aws_strutil_is_http_request_target(uri)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request path is invalid");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH);
+ goto error;
+ }
+
+ struct aws_byte_cursor version = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+
+ /**
+ * Calculate total size needed for outgoing_head_buffer, then write to buffer.
+ */
+
+ size_t header_lines_len;
+ err = s_scan_outgoing_headers(
+ message, request, &header_lines_len, false /*body_headers_ignored*/, false /*body_headers_forbidden*/);
+ if (err) {
+ goto error;
+ }
+
+ /* request-line: "{method} {uri} {version}\r\n" */
+ size_t request_line_len = 4; /* 2 spaces + "\r\n" */
+ err |= aws_add_size_checked(method.len, request_line_len, &request_line_len);
+ err |= aws_add_size_checked(uri.len, request_line_len, &request_line_len);
+ err |= aws_add_size_checked(version.len, request_line_len, &request_line_len);
+
+ /* head-end: "\r\n" */
+ size_t head_end_len = 2;
+
+ size_t head_total_len = request_line_len;
+ err |= aws_add_size_checked(header_lines_len, head_total_len, &head_total_len);
+ err |= aws_add_size_checked(head_end_len, head_total_len, &head_total_len);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len);
+ if (err) {
+ goto error;
+ }
+
+ bool wrote_all = true;
+
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, method);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, uri);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, version);
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+
+ s_write_headers(&message->outgoing_head_buf, aws_http_message_get_const_headers(request));
+
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+ (void)wrote_all;
+ AWS_ASSERT(wrote_all);
+
+ return AWS_OP_SUCCESS;
+error:
+ aws_h1_encoder_message_clean_up(message);
+ return AWS_OP_ERR;
+}
+
+int aws_h1_encoder_message_init_from_response(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *response,
+ bool body_headers_ignored,
+ struct aws_linked_list *pending_chunk_list) {
+
+ AWS_PRECONDITION(aws_linked_list_is_valid(pending_chunk_list));
+
+ AWS_ZERO_STRUCT(*message);
+
+ message->body = aws_input_stream_acquire(aws_http_message_get_body_stream(response));
+ message->pending_chunk_list = pending_chunk_list;
+
+ struct aws_byte_cursor version = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+
+ int status_int;
+ int err = aws_http_message_get_response_status(response, &status_int);
+ if (err) {
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE);
+ }
+
+ /* Status code must fit in 3 digits */
+ AWS_ASSERT(status_int >= 0 && status_int <= 999); /* aws_http_message should have already checked this */
+ char status_code_str[4] = "XXX";
+ snprintf(status_code_str, sizeof(status_code_str), "%03d", status_int);
+ struct aws_byte_cursor status_code = aws_byte_cursor_from_c_str(status_code_str);
+
+ struct aws_byte_cursor status_text = aws_byte_cursor_from_c_str(aws_http_status_text(status_int));
+
+ /**
+ * Calculate total size needed for outgoing_head_buffer, then write to buffer.
+ */
+
+ size_t header_lines_len;
+ /**
+ * no body needed in the response
+ * RFC-7230 section 3.3 Message Body
+ */
+ body_headers_ignored |= status_int == AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED;
+ bool body_headers_forbidden = status_int == AWS_HTTP_STATUS_CODE_204_NO_CONTENT || status_int / 100 == 1;
+ err = s_scan_outgoing_headers(message, response, &header_lines_len, body_headers_ignored, body_headers_forbidden);
+ if (err) {
+ goto error;
+ }
+
+ /* valid status must be three digital code, change it into byte_cursor */
+ /* response-line: "{version} {status} {status_text}\r\n" */
+ size_t response_line_len = 4; /* 2 spaces + "\r\n" */
+ err |= aws_add_size_checked(version.len, response_line_len, &response_line_len);
+ err |= aws_add_size_checked(status_code.len, response_line_len, &response_line_len);
+ err |= aws_add_size_checked(status_text.len, response_line_len, &response_line_len);
+
+ /* head-end: "\r\n" */
+ size_t head_end_len = 2;
+ size_t head_total_len = response_line_len;
+ err |= aws_add_size_checked(header_lines_len, head_total_len, &head_total_len);
+ err |= aws_add_size_checked(head_end_len, head_total_len, &head_total_len);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ bool wrote_all = true;
+
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, version);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, status_code);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, status_text);
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+
+ s_write_headers(&message->outgoing_head_buf, aws_http_message_get_const_headers(response));
+
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+ (void)wrote_all;
+ AWS_ASSERT(wrote_all);
+
+ /* Success! */
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_h1_encoder_message_clean_up(message);
+ return AWS_OP_ERR;
+}
+
+void aws_h1_encoder_message_clean_up(struct aws_h1_encoder_message *message) {
+ aws_input_stream_release(message->body);
+ aws_byte_buf_clean_up(&message->outgoing_head_buf);
+ aws_h1_trailer_destroy(message->trailer);
+ AWS_ZERO_STRUCT(*message);
+}
+
+void aws_h1_encoder_init(struct aws_h1_encoder *encoder, struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->allocator = allocator;
+}
+
+void aws_h1_encoder_clean_up(struct aws_h1_encoder *encoder) {
+ AWS_ZERO_STRUCT(*encoder);
+}
+
+int aws_h1_encoder_start_message(
+ struct aws_h1_encoder *encoder,
+ struct aws_h1_encoder_message *message,
+ struct aws_http_stream *stream) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(message);
+
+ if (encoder->message) {
+ ENCODER_LOG(ERROR, encoder, "Attempting to start new request while previous request is in progress.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ encoder->current_stream = stream;
+ encoder->message = message;
+
+ return AWS_OP_SUCCESS;
+}
+
+static bool s_write_chunk_size(struct aws_byte_buf *dst, uint64_t chunk_size) {
+ AWS_PRECONDITION(dst);
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dst));
+ char ascii_hex_chunk_size_str[MAX_ASCII_HEX_CHUNK_STR_SIZE] = {0};
+ snprintf(ascii_hex_chunk_size_str, sizeof(ascii_hex_chunk_size_str), "%" PRIX64, chunk_size);
+ return aws_byte_buf_write_from_whole_cursor(dst, aws_byte_cursor_from_c_str(ascii_hex_chunk_size_str));
+}
+
+static bool s_write_chunk_extension(struct aws_byte_buf *dst, struct aws_http1_chunk_extension *chunk_extension) {
+ AWS_PRECONDITION(chunk_extension);
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dst));
+ bool wrote_all = true;
+ wrote_all &= aws_byte_buf_write_u8(dst, ';');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, chunk_extension->key);
+ wrote_all &= aws_byte_buf_write_u8(dst, '=');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, chunk_extension->value);
+ return wrote_all;
+}
+
+static size_t s_calculate_chunk_line_size(const struct aws_http1_chunk_options *options) {
+ size_t chunk_line_size = MAX_ASCII_HEX_CHUNK_STR_SIZE + CRLF_SIZE;
+ for (size_t i = 0; i < options->num_extensions; ++i) {
+ struct aws_http1_chunk_extension *chunk_extension = options->extensions + i;
+ chunk_line_size += sizeof(';');
+ chunk_line_size += chunk_extension->key.len;
+ chunk_line_size += sizeof('=');
+ chunk_line_size += chunk_extension->value.len;
+ }
+ return chunk_line_size;
+}
+
+static void s_populate_chunk_line_buffer(
+ struct aws_byte_buf *chunk_line,
+ const struct aws_http1_chunk_options *options) {
+
+ bool wrote_chunk_line = true;
+ wrote_chunk_line &= s_write_chunk_size(chunk_line, options->chunk_data_size);
+ for (size_t i = 0; i < options->num_extensions; ++i) {
+ wrote_chunk_line &= s_write_chunk_extension(chunk_line, options->extensions + i);
+ }
+ wrote_chunk_line &= s_write_crlf(chunk_line);
+ AWS_ASSERT(wrote_chunk_line);
+ (void)wrote_chunk_line;
+}
+
+struct aws_h1_trailer *aws_h1_trailer_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_headers *trailing_headers) {
+ /* Allocate trailer along with storage for the trailer-line */
+ size_t trailer_size = 0;
+ if (s_scan_outgoing_trailer(trailing_headers, &trailer_size)) {
+ return NULL;
+ }
+
+ struct aws_h1_trailer *trailer = aws_mem_calloc(allocator, 1, sizeof(struct aws_h1_trailer));
+ trailer->allocator = allocator;
+
+ aws_byte_buf_init(&trailer->trailer_data, allocator, trailer_size); /* cannot fail */
+ s_write_headers(&trailer->trailer_data, trailing_headers);
+ s_write_crlf(&trailer->trailer_data); /* \r\n */
+ return trailer;
+}
+
+void aws_h1_trailer_destroy(struct aws_h1_trailer *trailer) {
+ if (trailer == NULL) {
+ return;
+ }
+ aws_byte_buf_clean_up(&trailer->trailer_data);
+ aws_mem_release(trailer->allocator, trailer);
+}
+
+struct aws_h1_chunk *aws_h1_chunk_new(struct aws_allocator *allocator, const struct aws_http1_chunk_options *options) {
+ /* Allocate chunk along with storage for the chunk-line */
+ struct aws_h1_chunk *chunk;
+ size_t chunk_line_size = s_calculate_chunk_line_size(options);
+ void *chunk_line_storage;
+ if (!aws_mem_acquire_many(
+ allocator, 2, &chunk, sizeof(struct aws_h1_chunk), &chunk_line_storage, chunk_line_size)) {
+ return NULL;
+ }
+
+ chunk->allocator = allocator;
+ chunk->data = aws_input_stream_acquire(options->chunk_data);
+ chunk->data_size = options->chunk_data_size;
+ chunk->on_complete = options->on_complete;
+ chunk->user_data = options->user_data;
+ chunk->chunk_line = aws_byte_buf_from_empty_array(chunk_line_storage, chunk_line_size);
+ s_populate_chunk_line_buffer(&chunk->chunk_line, options);
+ return chunk;
+}
+
+void aws_h1_chunk_destroy(struct aws_h1_chunk *chunk) {
+ AWS_PRECONDITION(chunk);
+ aws_input_stream_release(chunk->data);
+ aws_mem_release(chunk->allocator, chunk);
+}
+
+void aws_h1_chunk_complete_and_destroy(
+ struct aws_h1_chunk *chunk,
+ struct aws_http_stream *http_stream,
+ int error_code) {
+
+ AWS_PRECONDITION(chunk);
+
+ aws_http1_stream_write_chunk_complete_fn *on_complete = chunk->on_complete;
+ void *user_data = chunk->user_data;
+
+ /* Clean up before firing callback */
+ aws_h1_chunk_destroy(chunk);
+
+ if (NULL != on_complete) {
+ on_complete(http_stream, error_code, user_data);
+ }
+}
+
+static void s_clean_up_current_chunk(struct aws_h1_encoder *encoder, int error_code) {
+ AWS_PRECONDITION(encoder->current_chunk);
+ AWS_PRECONDITION(&encoder->current_chunk->node == aws_linked_list_front(encoder->message->pending_chunk_list));
+
+ aws_linked_list_remove(&encoder->current_chunk->node);
+ aws_h1_chunk_complete_and_destroy(encoder->current_chunk, encoder->current_stream, error_code);
+ encoder->current_chunk = NULL;
+}
+
+/* Write as much as possible from src_buf to dst, using encoder->progress_len to track progress.
+ * Returns true if the entire src_buf has been copied */
+static bool s_encode_buf(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst, const struct aws_byte_buf *src) {
+
+ /* advance src_cursor to current position in src_buf */
+ struct aws_byte_cursor src_cursor = aws_byte_cursor_from_buf(src);
+ aws_byte_cursor_advance(&src_cursor, (size_t)encoder->progress_bytes);
+
+ /* write as much as possible to dst, src_cursor is advanced as write occurs */
+ struct aws_byte_cursor written = aws_byte_buf_write_to_capacity(dst, &src_cursor);
+ encoder->progress_bytes += written.len;
+
+ return src_cursor.len == 0;
+}
+
+/* Write as much body stream as possible into dst buffer.
+ * Increments encoder->progress_bytes to track progress */
+static int s_encode_stream(
+ struct aws_h1_encoder *encoder,
+ struct aws_byte_buf *dst,
+ struct aws_input_stream *stream,
+ uint64_t total_length,
+ bool *out_done) {
+
+ *out_done = false;
+
+ if (dst->capacity == dst->len) {
+ /* Return success because we want to try again later */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Read from stream */
+ ENCODER_LOG(TRACE, encoder, "Reading from body stream.");
+ const size_t prev_len = dst->len;
+ int err = aws_input_stream_read(stream, dst);
+ const size_t amount_read = dst->len - prev_len;
+
+ if (err) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Failed to read body stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+
+ /* Increment progress_bytes, and make sure we haven't written too much */
+ int add_err = aws_add_u64_checked(encoder->progress_bytes, amount_read, &encoder->progress_bytes);
+ if (add_err || encoder->progress_bytes > total_length) {
+ ENCODER_LOGF(ERROR, encoder, "Body stream has exceeded expected length: %" PRIu64, total_length);
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Sending %zu bytes of body, progress: %" PRIu64 "/%" PRIu64,
+ amount_read,
+ encoder->progress_bytes,
+ total_length);
+
+ /* Return if we're done sending stream */
+ if (encoder->progress_bytes == total_length) {
+ *out_done = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Return if stream failed to write anything. Maybe the data isn't ready yet. */
+ if (amount_read == 0) {
+ /* Ensure we're not at end-of-stream too early */
+ struct aws_stream_status status;
+ err = aws_input_stream_get_status(stream, &status);
+ if (err) {
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Failed to query body stream status, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ if (status.is_end_of_stream) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Reached end of body stream but sent less than declared length %" PRIu64 "/%" PRIu64,
+ encoder->progress_bytes,
+ total_length);
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+ }
+
+ /* Not done streaming data out yet */
+ return AWS_OP_SUCCESS;
+}
+
+/* A state function should:
+ * - Raise an error only if unrecoverable error occurs.
+ * - `return s_switch_state(...)` to switch states.
+ * - `return AWS_OP_SUCCESS` if it can't progress any further (waiting for more
+ * space to write into, waiting for more chunks, etc). */
+typedef int encoder_state_fn(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst);
+
+/* Switch state.
+ * The only reason this returns a value is so it can be called with `return` to conclude a state function */
+static int s_switch_state(struct aws_h1_encoder *encoder, enum aws_h1_encoder_state state) {
+ encoder->state = state;
+ encoder->progress_bytes = 0;
+ return AWS_OP_SUCCESS;
+}
+
+/* Initial state. Waits until a new message is set */
+static int s_state_fn_init(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ (void)dst;
+
+ if (!encoder->message) {
+ /* Remain in this state. */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Start encoding message */
+ ENCODER_LOG(TRACE, encoder, "Starting to send data.");
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_HEAD);
+}
+
+/* Write out first line of request/response, plus all the headers.
+ * These have been pre-encoded in aws_h1_encoder_message->outgoing_head_buf. */
+static int s_state_fn_head(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done = s_encode_buf(encoder, dst, &encoder->message->outgoing_head_buf);
+ if (!done) {
+ /* Remain in this state */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Don't NEED to free this buffer now, but we don't need it anymore, so why not */
+ aws_byte_buf_clean_up(&encoder->message->outgoing_head_buf);
+
+ /* Pick next state */
+ if (encoder->message->body && encoder->message->content_length) {
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_UNCHUNKED_BODY);
+
+ } else if (encoder->message->has_chunked_encoding_header) {
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_NEXT);
+
+ } else {
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE);
+ }
+}
+
+/* Write out body (not using chunked encoding). */
+static int s_state_fn_unchunked_body(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done;
+ if (s_encode_stream(encoder, dst, encoder->message->body, encoder->message->content_length, &done)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!done) {
+ /* Remain in this state until we're done writing out body */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Message is done */
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE);
+}
+
+/* Select next chunk to work on.
+ * Encoder is essentially "paused" here if no chunks are available. */
+static int s_state_fn_chunk_next(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ (void)dst;
+
+ if (aws_linked_list_empty(encoder->message->pending_chunk_list)) {
+ /* Remain in this state until more chunks arrive */
+ ENCODER_LOG(TRACE, encoder, "No chunks ready to send, waiting for more...");
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Set next chunk and go to next state */
+ struct aws_linked_list_node *node = aws_linked_list_front(encoder->message->pending_chunk_list);
+ encoder->current_chunk = AWS_CONTAINER_OF(node, struct aws_h1_chunk, node);
+ encoder->chunk_count++;
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Begin sending chunk %zu with size %" PRIu64,
+ encoder->chunk_count,
+ encoder->current_chunk->data_size);
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_LINE);
+}
+
+/* Write out "chunk-size [chunk-ext] CRLF".
+ * This data is pre-encoded in the chunk's chunk_line buffer */
+static int s_state_fn_chunk_line(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done = s_encode_buf(encoder, dst, &encoder->current_chunk->chunk_line);
+ if (!done) {
+ /* Remain in state until done writing line */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Pick next state */
+ if (encoder->current_chunk->data_size == 0) {
+ /* If data_size is 0, then this was the last chunk, which has no body.
+ * Mark it complete and move on to trailer. */
+ ENCODER_LOG(TRACE, encoder, "Final chunk complete");
+ s_clean_up_current_chunk(encoder, AWS_ERROR_SUCCESS);
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_TRAILER);
+ }
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_BODY);
+}
+
+/* Write out data for current chunk */
+static int s_state_fn_chunk_body(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done;
+ if (s_encode_stream(encoder, dst, encoder->current_chunk->data, encoder->current_chunk->data_size, &done)) {
+ int error_code = aws_last_error();
+
+ /* The error was caused by the chunk itself, report that specific error in its completion callback */
+ s_clean_up_current_chunk(encoder, error_code);
+
+ /* Re-raise error, in case it got cleared during user callback */
+ return aws_raise_error(error_code);
+ }
+ if (!done) {
+ /* Remain in this state until we're done writing out body */
+ return AWS_OP_SUCCESS;
+ }
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_END);
+}
+
+/* Write CRLF and mark chunk as complete */
+static int s_state_fn_chunk_end(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done = s_write_crlf(dst);
+ if (!done) {
+ /* Remain in this state until done writing out CRLF */
+ return AWS_OP_SUCCESS;
+ }
+
+ ENCODER_LOG(TRACE, encoder, "Chunk complete");
+ s_clean_up_current_chunk(encoder, AWS_ERROR_SUCCESS);
+
+ /* Pick next chunk to work on */
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_NEXT);
+}
+
+/* Write out trailer after last chunk */
+static int s_state_fn_chunk_trailer(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done;
+ /* if a chunked trailer was set */
+ if (encoder->message->trailer) {
+ done = s_encode_buf(encoder, dst, &encoder->message->trailer->trailer_data);
+ } else {
+ done = s_write_crlf(dst);
+ }
+ if (!done) {
+ /* Remain in this state until we're done writing out trailer */
+ return AWS_OP_SUCCESS;
+ }
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE);
+}
+
+/* Message is done, loop back to start of state machine */
+static int s_state_fn_done(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ (void)dst;
+
+ ENCODER_LOG(TRACE, encoder, "Done sending data.");
+ encoder->message = NULL;
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_INIT);
+}
+
+struct encoder_state_def {
+ encoder_state_fn *fn;
+ const char *name;
+};
+
+static struct encoder_state_def s_encoder_states[] = {
+ [AWS_H1_ENCODER_STATE_INIT] = {.fn = s_state_fn_init, .name = "INIT"},
+ [AWS_H1_ENCODER_STATE_HEAD] = {.fn = s_state_fn_head, .name = "HEAD"},
+ [AWS_H1_ENCODER_STATE_UNCHUNKED_BODY] = {.fn = s_state_fn_unchunked_body, .name = "BODY"},
+ [AWS_H1_ENCODER_STATE_CHUNK_NEXT] = {.fn = s_state_fn_chunk_next, .name = "CHUNK_NEXT"},
+ [AWS_H1_ENCODER_STATE_CHUNK_LINE] = {.fn = s_state_fn_chunk_line, .name = "CHUNK_LINE"},
+ [AWS_H1_ENCODER_STATE_CHUNK_BODY] = {.fn = s_state_fn_chunk_body, .name = "CHUNK_BODY"},
+ [AWS_H1_ENCODER_STATE_CHUNK_END] = {.fn = s_state_fn_chunk_end, .name = "CHUNK_END"},
+ [AWS_H1_ENCODER_STATE_CHUNK_TRAILER] = {.fn = s_state_fn_chunk_trailer, .name = "CHUNK_TRAILER"},
+ [AWS_H1_ENCODER_STATE_DONE] = {.fn = s_state_fn_done, .name = "DONE"},
+};
+
+int aws_h1_encoder_process(struct aws_h1_encoder *encoder, struct aws_byte_buf *out_buf) {
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(out_buf);
+
+ if (!encoder->message) {
+ ENCODER_LOG(ERROR, encoder, "No message is currently set for encoding.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ /* Run state machine until states stop changing. (due to out_buf running
+ * out of space, input_stream stalling, waiting for more chunks, etc) */
+ enum aws_h1_encoder_state prev_state;
+ do {
+ prev_state = encoder->state;
+ if (s_encoder_states[encoder->state].fn(encoder, out_buf)) {
+ return AWS_OP_ERR;
+ }
+ } while (prev_state != encoder->state);
+
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_h1_encoder_is_message_in_progress(const struct aws_h1_encoder *encoder) {
+ return encoder->message;
+}
+
+bool aws_h1_encoder_is_waiting_for_chunks(const struct aws_h1_encoder *encoder) {
+ return encoder->state == AWS_H1_ENCODER_STATE_CHUNK_NEXT &&
+ aws_linked_list_empty(encoder->message->pending_chunk_list);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_stream.c b/contrib/restricted/aws/aws-c-http/source/h1_stream.c
new file mode 100644
index 00000000000..a5d2f4782b4
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_stream.c
@@ -0,0 +1,535 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/h1_stream.h>
+
+#include <aws/http/private/h1_connection.h>
+#include <aws/http/private/h1_encoder.h>
+
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+#include <inttypes.h>
+
+static void s_stream_destroy(struct aws_http_stream *stream_base) {
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+ AWS_ASSERT(
+ stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE &&
+ "Stream should be complete (or never-activated) when stream destroyed");
+ AWS_ASSERT(
+ aws_linked_list_empty(&stream->thread_data.pending_chunk_list) &&
+ aws_linked_list_empty(&stream->synced_data.pending_chunk_list) &&
+ "Chunks should be marked complete before stream destroyed");
+
+ aws_h1_encoder_message_clean_up(&stream->encoder_message);
+ aws_byte_buf_clean_up(&stream->incoming_storage_buf);
+ aws_mem_release(stream->base.alloc, stream);
+}
+
+static struct aws_h1_connection *s_get_h1_connection(const struct aws_h1_stream *stream) {
+ return AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h1_connection, base);
+}
+
+static void s_stream_lock_synced_data(struct aws_h1_stream *stream) {
+ aws_h1_connection_lock_synced_data(s_get_h1_connection(stream));
+}
+
+static void s_stream_unlock_synced_data(struct aws_h1_stream *stream) {
+ aws_h1_connection_unlock_synced_data(s_get_h1_connection(stream));
+}
+
+static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ struct aws_h1_stream *stream = arg;
+ struct aws_h1_connection *connection = s_get_h1_connection(stream);
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto done;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Running stream cross-thread work task.", (void *)&stream->base);
+
+ /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+
+ stream->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ int api_state = stream->synced_data.api_state;
+
+ bool found_chunks = !aws_linked_list_empty(&stream->synced_data.pending_chunk_list);
+ aws_linked_list_move_all_back(&stream->thread_data.pending_chunk_list, &stream->synced_data.pending_chunk_list);
+
+ stream->encoder_message.trailer = stream->synced_data.pending_trailer;
+ stream->synced_data.pending_trailer = NULL;
+
+ bool has_outgoing_response = stream->synced_data.has_outgoing_response;
+
+ uint64_t pending_window_update = stream->synced_data.pending_window_update;
+ stream->synced_data.pending_window_update = 0;
+
+ s_stream_unlock_synced_data(stream);
+ /* END CRITICAL SECTION */
+
+ /* If we have any new outgoing data, prompt the connection to try and send it. */
+ bool new_outgoing_data = found_chunks;
+
+ /* If we JUST learned about having an outgoing response, that's a reason to try sending data */
+ if (has_outgoing_response && !stream->thread_data.has_outgoing_response) {
+ stream->thread_data.has_outgoing_response = true;
+ new_outgoing_data = true;
+ }
+
+ if (new_outgoing_data && (api_state == AWS_H1_STREAM_API_STATE_ACTIVE)) {
+ aws_h1_connection_try_write_outgoing_stream(connection);
+ }
+
+ /* Add to window size using saturated sum to prevent overflow.
+ * Saturating is fine because it's a u64, the stream could never receive that much data. */
+ stream->thread_data.stream_window =
+ aws_add_u64_saturating(stream->thread_data.stream_window, pending_window_update);
+ if ((pending_window_update > 0) && (api_state == AWS_H1_STREAM_API_STATE_ACTIVE)) {
+ /* Now that stream window is larger, connection might have buffered
+ * data to send, or might need to increment its own window */
+ aws_h1_connection_try_process_read_messages(connection);
+ }
+
+done:
+ /* Release reference that kept stream alive until task ran */
+ aws_http_stream_release(&stream->base);
+}
+
+/* Note the update in synced_data, and schedule the cross_thread_work_task if necessary */
+static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size) {
+ if (increment_size == 0) {
+ return;
+ }
+
+ if (!stream_base->owning_connection->stream_manual_window_management) {
+ return;
+ }
+
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+
+ /* Saturated sum. It's a u64. The stream could never receive that much data. */
+ stream->synced_data.pending_window_update =
+ aws_add_u64_saturating(stream->synced_data.pending_window_update, increment_size);
+
+ /* Don't alert the connection unless the stream is active */
+ if (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_ACTIVE) {
+ if (!stream->synced_data.is_cross_thread_work_task_scheduled) {
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ should_schedule_task = true;
+ }
+ }
+
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ }
+}
+
+static int s_stream_write_chunk(struct aws_http_stream *stream_base, const struct aws_http1_chunk_options *options) {
+ AWS_PRECONDITION(stream_base);
+ AWS_PRECONDITION(options);
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+
+ if (options->chunk_data == NULL && options->chunk_data_size > 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Chunk data cannot be NULL if data size is non-zero", (void *)stream_base);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_h1_chunk *chunk = aws_h1_chunk_new(stream_base->alloc, options);
+ if (AWS_UNLIKELY(NULL == chunk)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to initialize streamed chunk, error %d (%s).",
+ (void *)stream_base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ int error_code = 0;
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+
+ /* Can only add chunks while stream is active. */
+ if (stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE) {
+ error_code = (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_INIT)
+ ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED
+ : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ goto unlock;
+ }
+
+ /* Prevent user trying to submit chunks without having set the required headers.
+ * This check also prevents a server-user submitting chunks before the response has been submitted. */
+ if (!stream->synced_data.using_chunked_encoding) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Cannot write chunks without 'transfer-encoding: chunked' header.",
+ (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ if (stream->synced_data.has_final_chunk) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Cannot write additional chunk after final chunk.", (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ /* success */
+ if (chunk->data_size == 0) {
+ stream->synced_data.has_final_chunk = true;
+ }
+ aws_linked_list_push_back(&stream->synced_data.pending_chunk_list, &chunk->node);
+ should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+
+ unlock:
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to add chunk, error %d (%s)",
+ (void *)stream_base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_h1_chunk_destroy(chunk);
+ return aws_raise_error(error_code);
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Adding chunk with size %" PRIu64 " to stream",
+ (void *)stream,
+ options->chunk_data_size);
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)stream_base);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_stream_add_trailer(struct aws_http_stream *stream_base, const struct aws_http_headers *trailing_headers) {
+ AWS_PRECONDITION(stream_base);
+ AWS_PRECONDITION(trailing_headers);
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+
+ struct aws_h1_trailer *trailer = aws_h1_trailer_new(stream_base->alloc, trailing_headers);
+ if (AWS_UNLIKELY(NULL == trailer)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to initialize streamed trailer, error %d (%s).",
+ (void *)stream_base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ int error_code = 0;
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+ /* Can only add trailers while stream is active. */
+ if (stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE) {
+ error_code = (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_INIT)
+ ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED
+ : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ goto unlock;
+ }
+
+ if (!stream->synced_data.using_chunked_encoding) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Cannot write trailers without 'transfer-encoding: chunked' header.",
+ (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ if (stream->synced_data.has_added_trailer) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Cannot write trailers twice.", (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ if (stream->synced_data.has_final_chunk) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Cannot write trailers after final chunk.", (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ stream->synced_data.has_added_trailer = true;
+ stream->synced_data.pending_trailer = trailer;
+ should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+
+ unlock:
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to add trailer, error %d (%s)",
+ (void *)stream_base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_h1_trailer_destroy(trailer);
+ return aws_raise_error(error_code);
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Adding trailer to stream", (void *)stream);
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)stream_base);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static const struct aws_http_stream_vtable s_stream_vtable = {
+ .destroy = s_stream_destroy,
+ .update_window = s_stream_update_window,
+ .activate = aws_h1_stream_activate,
+ .http1_write_chunk = s_stream_write_chunk,
+ .http1_add_trailer = s_stream_add_trailer,
+ .http2_reset_stream = NULL,
+ .http2_get_received_error_code = NULL,
+ .http2_get_sent_error_code = NULL,
+};
+
+static struct aws_h1_stream *s_stream_new_common(
+ struct aws_http_connection *connection_base,
+ void *user_data,
+ aws_http_on_incoming_headers_fn *on_incoming_headers,
+ aws_http_on_incoming_header_block_done_fn *on_incoming_header_block_done,
+ aws_http_on_incoming_body_fn *on_incoming_body,
+ aws_http_on_stream_complete_fn *on_complete,
+ aws_http_on_stream_destroy_fn *on_destroy) {
+
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+
+ struct aws_h1_stream *stream = aws_mem_calloc(connection_base->alloc, 1, sizeof(struct aws_h1_stream));
+ if (!stream) {
+ return NULL;
+ }
+
+ stream->base.vtable = &s_stream_vtable;
+ stream->base.alloc = connection_base->alloc;
+ stream->base.owning_connection = connection_base;
+ stream->base.user_data = user_data;
+ stream->base.on_incoming_headers = on_incoming_headers;
+ stream->base.on_incoming_header_block_done = on_incoming_header_block_done;
+ stream->base.on_incoming_body = on_incoming_body;
+ stream->base.on_complete = on_complete;
+ stream->base.on_destroy = on_destroy;
+
+ aws_channel_task_init(
+ &stream->cross_thread_work_task, s_stream_cross_thread_work_task, stream, "http1_stream_cross_thread_work");
+
+ aws_linked_list_init(&stream->thread_data.pending_chunk_list);
+ aws_linked_list_init(&stream->synced_data.pending_chunk_list);
+
+ stream->thread_data.stream_window = connection->initial_stream_window_size;
+
+ /* Stream refcount starts at 1 for user and is incremented upon activation for the connection */
+ aws_atomic_init_int(&stream->base.refcount, 1);
+
+ return stream;
+}
+
+struct aws_h1_stream *aws_h1_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+
+ struct aws_h1_stream *stream = s_stream_new_common(
+ client_connection,
+ options->user_data,
+ options->on_response_headers,
+ options->on_response_header_block_done,
+ options->on_response_body,
+ options->on_complete,
+ options->on_destroy);
+ if (!stream) {
+ return NULL;
+ }
+
+ /* Transform request if necessary */
+ if (client_connection->proxy_request_transform) {
+ if (client_connection->proxy_request_transform(options->request, client_connection->user_data)) {
+ goto error;
+ }
+ }
+
+ stream->base.client_data = &stream->base.client_or_server_data.client;
+ stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+
+ /* Validate request and cache info that the encoder will eventually need */
+ if (aws_h1_encoder_message_init_from_request(
+ &stream->encoder_message,
+ client_connection->alloc,
+ options->request,
+ &stream->thread_data.pending_chunk_list)) {
+ goto error;
+ }
+
+ /* RFC-7230 Section 6.3: The "close" connection option is used to signal
+ * that a connection will not persist after the current request/response*/
+ if (stream->encoder_message.has_connection_close_header) {
+ stream->is_final_stream = true;
+ }
+
+ stream->synced_data.using_chunked_encoding = stream->encoder_message.has_chunked_encoding_header;
+
+ return stream;
+
+error:
+ s_stream_destroy(&stream->base);
+ return NULL;
+}
+
+struct aws_h1_stream *aws_h1_stream_new_request_handler(const struct aws_http_request_handler_options *options) {
+ struct aws_h1_stream *stream = s_stream_new_common(
+ options->server_connection,
+ options->user_data,
+ options->on_request_headers,
+ options->on_request_header_block_done,
+ options->on_request_body,
+ options->on_complete,
+ options->on_destroy);
+ if (!stream) {
+ return NULL;
+ }
+
+ /* This code is only executed in server mode and can only be invoked from the event-loop thread so don't worry
+ * with the lock here. */
+ stream->base.id = aws_http_connection_get_next_stream_id(options->server_connection);
+
+ /* Request-handler (server) streams don't need user to call activate() on them.
+ * Since these these streams can only be created on the event-loop thread,
+ * it's not possible for callbacks to fire before the stream pointer is returned.
+ * (Clients must call stream.activate() because they might create a stream on any thread) */
+ stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_ACTIVE;
+
+ stream->base.server_data = &stream->base.client_or_server_data.server;
+ stream->base.server_data->on_request_done = options->on_request_done;
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+
+ return stream;
+}
+
+int aws_h1_stream_send_response(struct aws_h1_stream *stream, struct aws_http_message *response) {
+ struct aws_h1_connection *connection = s_get_h1_connection(stream);
+ int error_code = 0;
+
+ /* Validate the response and cache info that encoder will eventually need.
+ * The encoder_message object will be moved into the stream later while holding the lock */
+ struct aws_h1_encoder_message encoder_message;
+ bool body_headers_ignored = stream->base.request_method == AWS_HTTP_METHOD_HEAD;
+ if (aws_h1_encoder_message_init_from_response(
+ &encoder_message,
+ stream->base.alloc,
+ response,
+ body_headers_ignored,
+ &stream->thread_data.pending_chunk_list)) {
+ error_code = aws_last_error();
+ goto error;
+ }
+
+ bool should_schedule_task = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+ if (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_COMPLETE) {
+ error_code = AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ } else if (stream->synced_data.has_outgoing_response) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Response already created on the stream", (void *)&stream->base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ } else {
+ stream->synced_data.has_outgoing_response = true;
+ stream->encoder_message = encoder_message;
+ if (encoder_message.has_connection_close_header) {
+ /* This will be the last stream connection will process, new streams will be rejected */
+ stream->is_final_stream = true;
+
+ /* Note: We're touching the connection's synced_data, which is OK
+ * because an h1_connection and all its h1_streams share a single lock. */
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ stream->synced_data.using_chunked_encoding = stream->encoder_message.has_chunked_encoding_header;
+
+ should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ }
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (error_code) {
+ goto error;
+ }
+
+ /* Success! */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Created response on connection=%p: ", (void *)stream, (void *)connection);
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)&stream->base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)&stream->base);
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Sending response on the stream failed, error %d (%s)",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_h1_encoder_message_clean_up(&encoder_message);
+ return aws_raise_error(error_code);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_connection.c b/contrib/restricted/aws/aws-c-http/source/h2_connection.c
new file mode 100644
index 00000000000..15ea192f8ab
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_connection.c
@@ -0,0 +1,2850 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_connection.h>
+#include <aws/http/private/h2_stream.h>
+
+#include <aws/http/private/h2_decoder.h>
+#include <aws/http/private/h2_stream.h>
+#include <aws/http/private/strutil.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/logging.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */
+#undef DEBUG
+
+#define CONNECTION_LOGF(level, connection, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_CONNECTION, "id=%p: " text, (void *)(connection), __VA_ARGS__)
+#define CONNECTION_LOG(level, connection, text) CONNECTION_LOGF(level, connection, "%s", text)
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size);
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately);
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler);
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler);
+static void s_handler_destroy(struct aws_channel_handler *handler);
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot);
+static struct aws_http_stream *s_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+static void s_connection_close(struct aws_http_connection *connection_base);
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base);
+static bool s_connection_is_open(const struct aws_http_connection *connection_base);
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base);
+static void s_connection_update_window(struct aws_http_connection *connection_base, uint32_t increment_size);
+static int s_connection_change_settings(
+ struct aws_http_connection *connection_base,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+static int s_connection_send_ping(
+ struct aws_http_connection *connection_base,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data);
+static void s_connection_send_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+static int s_connection_get_sent_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+static int s_connection_get_received_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+static void s_connection_get_local_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+static void s_connection_get_remote_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+
+static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static int s_encode_outgoing_frames_queue(struct aws_h2_connection *connection, struct aws_byte_buf *output);
+static int s_encode_data_from_outgoing_streams(struct aws_h2_connection *connection, struct aws_byte_buf *output);
+static int s_record_closed_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ enum aws_h2_stream_closed_when closed_when);
+static void s_stream_complete(struct aws_h2_connection *connection, struct aws_h2_stream *stream, int error_code);
+static void s_write_outgoing_frames(struct aws_h2_connection *connection, bool first_try);
+static void s_finish_shutdown(struct aws_h2_connection *connection);
+static void s_send_goaway(
+ struct aws_h2_connection *connection,
+ uint32_t h2_error_code,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+static struct aws_h2_pending_settings *s_new_pending_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+
+static struct aws_h2err s_decoder_on_headers_begin(uint32_t stream_id, void *userdata);
+static struct aws_h2err s_decoder_on_headers_i(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type,
+ void *userdata);
+static struct aws_h2err s_decoder_on_headers_end(
+ uint32_t stream_id,
+ bool malformed,
+ enum aws_http_header_block block_type,
+ void *userdata);
+static struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised_stream_id, void *userdata);
+static struct aws_h2err s_decoder_on_data_begin(
+ uint32_t stream_id,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream,
+ void *userdata);
+static struct aws_h2err s_decoder_on_data_i(uint32_t stream_id, struct aws_byte_cursor data, void *userdata);
+static struct aws_h2err s_decoder_on_end_stream(uint32_t stream_id, void *userdata);
+static struct aws_h2err s_decoder_on_rst_stream(uint32_t stream_id, uint32_t h2_error_code, void *userdata);
+static struct aws_h2err s_decoder_on_ping_ack(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+static struct aws_h2err s_decoder_on_ping(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+static struct aws_h2err s_decoder_on_settings(
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ void *userdata);
+static struct aws_h2err s_decoder_on_settings_ack(void *userdata);
+static struct aws_h2err s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata);
+struct aws_h2err s_decoder_on_goaway(
+ uint32_t last_stream,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data,
+ void *userdata);
+static void s_reset_statistics(struct aws_channel_handler *handler);
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats);
+
+static struct aws_http_connection_vtable s_h2_connection_vtable = {
+ .channel_handler_vtable =
+ {
+ .process_read_message = s_handler_process_read_message,
+ .process_write_message = s_handler_process_write_message,
+ .increment_read_window = s_handler_increment_read_window,
+ .shutdown = s_handler_shutdown,
+ .initial_window_size = s_handler_initial_window_size,
+ .message_overhead = s_handler_message_overhead,
+ .destroy = s_handler_destroy,
+ .reset_statistics = s_reset_statistics,
+ .gather_statistics = s_gather_statistics,
+ },
+
+ .on_channel_handler_installed = s_handler_installed,
+ .make_request = s_connection_make_request,
+ .new_server_request_handler_stream = NULL,
+ .stream_send_response = NULL,
+ .close = s_connection_close,
+ .stop_new_requests = s_connection_stop_new_request,
+ .is_open = s_connection_is_open,
+ .new_requests_allowed = s_connection_new_requests_allowed,
+ .update_window = s_connection_update_window,
+ .change_settings = s_connection_change_settings,
+ .send_ping = s_connection_send_ping,
+ .send_goaway = s_connection_send_goaway,
+ .get_sent_goaway = s_connection_get_sent_goaway,
+ .get_received_goaway = s_connection_get_received_goaway,
+ .get_local_settings = s_connection_get_local_settings,
+ .get_remote_settings = s_connection_get_remote_settings,
+};
+
+static const struct aws_h2_decoder_vtable s_h2_decoder_vtable = {
+ .on_headers_begin = s_decoder_on_headers_begin,
+ .on_headers_i = s_decoder_on_headers_i,
+ .on_headers_end = s_decoder_on_headers_end,
+ .on_push_promise_begin = s_decoder_on_push_promise,
+ .on_data_begin = s_decoder_on_data_begin,
+ .on_data_i = s_decoder_on_data_i,
+ .on_end_stream = s_decoder_on_end_stream,
+ .on_rst_stream = s_decoder_on_rst_stream,
+ .on_ping_ack = s_decoder_on_ping_ack,
+ .on_ping = s_decoder_on_ping,
+ .on_settings = s_decoder_on_settings,
+ .on_settings_ack = s_decoder_on_settings_ack,
+ .on_window_update = s_decoder_on_window_update,
+ .on_goaway = s_decoder_on_goaway,
+};
+
+static void s_lock_synced_data(struct aws_h2_connection *connection) {
+ int err = aws_mutex_lock(&connection->synced_data.lock);
+ AWS_ASSERT(!err && "lock failed");
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_h2_connection *connection) {
+ int err = aws_mutex_unlock(&connection->synced_data.lock);
+ AWS_ASSERT(!err && "unlock failed");
+ (void)err;
+}
+
+static void s_acquire_stream_and_connection_lock(struct aws_h2_stream *stream, struct aws_h2_connection *connection) {
+ int err = aws_mutex_lock(&stream->synced_data.lock);
+ err |= aws_mutex_lock(&connection->synced_data.lock);
+ AWS_ASSERT(!err && "lock connection and stream failed");
+ (void)err;
+}
+
+static void s_release_stream_and_connection_lock(struct aws_h2_stream *stream, struct aws_h2_connection *connection) {
+ int err = aws_mutex_unlock(&connection->synced_data.lock);
+ err |= aws_mutex_unlock(&stream->synced_data.lock);
+ AWS_ASSERT(!err && "unlock connection and stream failed");
+ (void)err;
+}
+
+static void s_add_time_measurement_to_stats(uint64_t start_ns, uint64_t end_ns, uint64_t *output_ms) {
+ if (end_ns > start_ns) {
+ *output_ms += aws_timestamp_convert(end_ns - start_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL);
+ } else {
+ *output_ms = 0;
+ }
+}
+
+/**
+ * Internal function for bringing connection to a stop.
+ * Invoked multiple times, including when:
+ * - Channel is shutting down in the read direction.
+ * - Channel is shutting down in the write direction.
+ * - An error occurs that will shutdown the channel.
+ * - User wishes to close the connection (this is the only case where the function may run off-thread).
+ */
+static void s_stop(
+ struct aws_h2_connection *connection,
+ bool stop_reading,
+ bool stop_writing,
+ bool schedule_shutdown,
+ int error_code) {
+
+ AWS_ASSERT(stop_reading || stop_writing || schedule_shutdown); /* You are required to stop at least 1 thing */
+
+ if (stop_reading) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_reading_stopped = true;
+ }
+
+ if (stop_writing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_writing_stopped = true;
+ }
+
+ /* Even if we're not scheduling shutdown just yet (ex: sent final request but waiting to read final response)
+ * we don't consider the connection "open" anymore so user can't create more streams */
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ connection->synced_data.is_open = false;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (schedule_shutdown) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Shutting down connection with error code %d (%s).",
+ (void *)&connection->base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_channel_shutdown(connection->base.channel_slot->channel, error_code);
+ }
+}
+
+void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *connection, int error_code) {
+ AWS_PRECONDITION(error_code);
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written) {
+ /* If shutdown is waiting for writes to complete, but writes are now broken,
+ * then we must finish shutdown now */
+ s_finish_shutdown(connection);
+ } else {
+ s_stop(connection, false /*stop_reading*/, true /*stop_writing*/, true /*schedule_shutdown*/, error_code);
+ }
+}
+
+/* Common new() logic for server & client */
+static struct aws_h2_connection *s_connection_new(
+ struct aws_allocator *alloc,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options,
+ bool server) {
+
+ AWS_PRECONDITION(http2_options);
+
+ struct aws_h2_connection *connection = aws_mem_calloc(alloc, 1, sizeof(struct aws_h2_connection));
+ if (!connection) {
+ return NULL;
+ }
+ connection->base.vtable = &s_h2_connection_vtable;
+ connection->base.alloc = alloc;
+ connection->base.channel_handler.vtable = &s_h2_connection_vtable.channel_handler_vtable;
+ connection->base.channel_handler.alloc = alloc;
+ connection->base.channel_handler.impl = connection;
+ connection->base.http_version = AWS_HTTP_VERSION_2;
+ /* Init the next stream id (server must use even ids, client odd [RFC 7540 5.1.1])*/
+ connection->base.next_stream_id = (server ? 2 : 1);
+ /* Stream window management */
+ connection->base.stream_manual_window_management = manual_window_management;
+
+ /* Connection window management */
+ connection->conn_manual_window_management = http2_options->conn_manual_window_management;
+ connection->on_goaway_received = http2_options->on_goaway_received;
+ connection->on_remote_settings_change = http2_options->on_remote_settings_change;
+
+ aws_channel_task_init(
+ &connection->cross_thread_work_task, s_cross_thread_work_task, connection, "HTTP/2 cross-thread work");
+
+ aws_channel_task_init(
+ &connection->outgoing_frames_task, s_outgoing_frames_task, connection, "HTTP/2 outgoing frames");
+
+ /* 1 refcount for user */
+ aws_atomic_init_int(&connection->base.refcount, 1);
+ uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX;
+ connection->synced_data.goaway_sent_last_stream_id = max_stream_id + 1;
+ connection->synced_data.goaway_received_last_stream_id = max_stream_id + 1;
+
+ aws_linked_list_init(&connection->synced_data.pending_stream_list);
+ aws_linked_list_init(&connection->synced_data.pending_frame_list);
+ aws_linked_list_init(&connection->synced_data.pending_settings_list);
+ aws_linked_list_init(&connection->synced_data.pending_ping_list);
+ aws_linked_list_init(&connection->synced_data.pending_goaway_list);
+
+ aws_linked_list_init(&connection->thread_data.outgoing_streams_list);
+ aws_linked_list_init(&connection->thread_data.pending_settings_queue);
+ aws_linked_list_init(&connection->thread_data.pending_ping_queue);
+ aws_linked_list_init(&connection->thread_data.stalled_window_streams_list);
+ aws_linked_list_init(&connection->thread_data.waiting_streams_list);
+ aws_linked_list_init(&connection->thread_data.outgoing_frames_queue);
+
+ if (aws_mutex_init(&connection->synced_data.lock)) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Mutex init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_hash_table_init(
+ &connection->thread_data.active_streams_map, alloc, 8, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) {
+
+ CONNECTION_LOGF(
+ ERROR, connection, "Hashtable init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+ size_t max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS;
+ if (http2_options->max_closed_streams) {
+ max_closed_streams = http2_options->max_closed_streams;
+ }
+
+ connection->thread_data.closed_streams =
+ aws_cache_new_fifo(alloc, aws_hash_ptr, aws_ptr_eq, NULL, NULL, max_closed_streams);
+ if (!connection->thread_data.closed_streams) {
+ CONNECTION_LOGF(
+ ERROR, connection, "FIFO cache init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Initialize the value of settings */
+ memcpy(connection->thread_data.settings_peer, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+ memcpy(connection->thread_data.settings_self, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+
+ memcpy(connection->synced_data.settings_peer, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+ memcpy(connection->synced_data.settings_self, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+
+ connection->thread_data.window_size_peer = AWS_H2_INIT_WINDOW_SIZE;
+ connection->thread_data.window_size_self = AWS_H2_INIT_WINDOW_SIZE;
+
+ connection->thread_data.goaway_received_last_stream_id = AWS_H2_STREAM_ID_MAX;
+ connection->thread_data.goaway_sent_last_stream_id = AWS_H2_STREAM_ID_MAX;
+
+ aws_crt_statistics_http2_channel_init(&connection->thread_data.stats);
+ connection->thread_data.stats.was_inactive = true; /* Start with non active streams */
+
+ connection->synced_data.is_open = true;
+ connection->synced_data.new_stream_error_code = AWS_ERROR_SUCCESS;
+
+ /* Create a new decoder */
+ struct aws_h2_decoder_params params = {
+ .alloc = alloc,
+ .vtable = &s_h2_decoder_vtable,
+ .userdata = connection,
+ .logging_id = connection,
+ .is_server = server,
+ };
+ connection->thread_data.decoder = aws_h2_decoder_new(&params);
+ if (!connection->thread_data.decoder) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Decoder init error %d (%s)", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_h2_frame_encoder_init(&connection->thread_data.encoder, alloc, &connection->base)) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Encoder init error %d (%s)", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+ /* User data from connection base is not ready until the handler installed */
+ connection->thread_data.init_pending_settings = s_new_pending_settings(
+ connection->base.alloc,
+ http2_options->initial_settings_array,
+ http2_options->num_initial_settings,
+ http2_options->on_initial_settings_completed,
+ NULL /* user_data is set later... */);
+ if (!connection->thread_data.init_pending_settings) {
+ goto error;
+ }
+ /* We enqueue the inital settings when handler get installed */
+ return connection;
+
+error:
+ s_handler_destroy(&connection->base.channel_handler);
+
+ return NULL;
+}
+
+struct aws_http_connection *aws_http_connection_new_http2_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options) {
+
+ struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, true);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.server_data = &connection->base.client_or_server_data.server;
+
+ return &connection->base;
+}
+
+struct aws_http_connection *aws_http_connection_new_http2_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options) {
+
+ struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, false);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.client_data = &connection->base.client_or_server_data.client;
+
+ return &connection->base;
+}
+
+static void s_handler_destroy(struct aws_channel_handler *handler) {
+ struct aws_h2_connection *connection = handler->impl;
+ CONNECTION_LOG(TRACE, connection, "Destroying connection");
+
+ /* No streams should be left in internal datastructures */
+ AWS_ASSERT(
+ !aws_hash_table_is_valid(&connection->thread_data.active_streams_map) ||
+ aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0);
+
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.waiting_streams_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.stalled_window_streams_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.outgoing_streams_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_stream_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_frame_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_settings_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_ping_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_goaway_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.pending_ping_queue));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.pending_settings_queue));
+
+ /* Clean up any unsent frames and structures */
+ struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue;
+ while (!aws_linked_list_empty(outgoing_frames_queue)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(outgoing_frames_queue);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node);
+ aws_h2_frame_destroy(frame);
+ }
+ if (connection->thread_data.init_pending_settings) {
+ /* if initial settings were never sent, we need to clear the memory here */
+ aws_mem_release(connection->base.alloc, connection->thread_data.init_pending_settings);
+ }
+ aws_h2_decoder_destroy(connection->thread_data.decoder);
+ aws_h2_frame_encoder_clean_up(&connection->thread_data.encoder);
+ aws_hash_table_clean_up(&connection->thread_data.active_streams_map);
+ aws_cache_destroy(connection->thread_data.closed_streams);
+ aws_mutex_clean_up(&connection->synced_data.lock);
+ aws_mem_release(connection->base.alloc, connection);
+}
+
+static struct aws_h2_pending_settings *s_new_pending_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data) {
+
+ size_t settings_storage_size = sizeof(struct aws_http2_setting) * num_settings;
+ struct aws_h2_pending_settings *pending_settings;
+ void *settings_storage;
+ if (!aws_mem_acquire_many(
+ allocator,
+ 2,
+ &pending_settings,
+ sizeof(struct aws_h2_pending_settings),
+ &settings_storage,
+ settings_storage_size)) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*pending_settings);
+ /* We buffer the settings up, incase the caller has freed them when the ACK arrives */
+ pending_settings->settings_array = settings_storage;
+ if (settings_array) {
+ memcpy(pending_settings->settings_array, settings_array, num_settings * sizeof(struct aws_http2_setting));
+ }
+ pending_settings->num_settings = num_settings;
+ pending_settings->on_completed = on_completed;
+ pending_settings->user_data = user_data;
+
+ return pending_settings;
+}
+
+static struct aws_h2_pending_ping *s_new_pending_ping(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *optional_opaque_data,
+ const uint64_t started_time,
+ void *user_data,
+ aws_http2_on_ping_complete_fn *on_completed) {
+
+ struct aws_h2_pending_ping *pending_ping = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_pending_ping));
+ if (!pending_ping) {
+ return NULL;
+ }
+ if (optional_opaque_data) {
+ memcpy(pending_ping->opaque_data, optional_opaque_data->ptr, AWS_HTTP2_PING_DATA_SIZE);
+ }
+ pending_ping->started_time = started_time;
+ pending_ping->on_completed = on_completed;
+ pending_ping->user_data = user_data;
+ return pending_ping;
+}
+
+static struct aws_h2_pending_goaway *s_new_pending_goaway(
+ struct aws_allocator *allocator,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+
+ struct aws_byte_cursor debug_data;
+ AWS_ZERO_STRUCT(debug_data);
+ if (optional_debug_data) {
+ debug_data = *optional_debug_data;
+ }
+ struct aws_h2_pending_goaway *pending_goaway;
+ void *debug_data_storage;
+ /* mem acquire cannot fail anymore */
+ aws_mem_acquire_many(
+ allocator, 2, &pending_goaway, sizeof(struct aws_h2_pending_goaway), &debug_data_storage, debug_data.len);
+ if (debug_data.len) {
+ memcpy(debug_data_storage, debug_data.ptr, debug_data.len);
+ debug_data.ptr = debug_data_storage;
+ }
+ pending_goaway->debug_data = debug_data;
+ pending_goaway->http2_error = http2_error;
+ pending_goaway->allow_more_streams = allow_more_streams;
+ return pending_goaway;
+}
+
+void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame) {
+ AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA);
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (frame->high_priority) {
+ /* Check from the head of the queue, and find a node with normal priority, and insert before it */
+ struct aws_linked_list_node *iter = aws_linked_list_begin(&connection->thread_data.outgoing_frames_queue);
+ /* one past the last element */
+ const struct aws_linked_list_node *end = aws_linked_list_end(&connection->thread_data.outgoing_frames_queue);
+ while (iter != end) {
+ struct aws_h2_frame *frame_i = AWS_CONTAINER_OF(iter, struct aws_h2_frame, node);
+ if (connection->thread_data.current_outgoing_frame == frame_i) {
+ iter = iter->next;
+ continue;
+ }
+ if (!frame_i->high_priority) {
+ break;
+ }
+ iter = iter->next;
+ }
+ aws_linked_list_insert_before(iter, &frame->node);
+ } else {
+ aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &frame->node);
+ }
+}
+
+static void s_on_channel_write_complete(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data) {
+
+ (void)message;
+ struct aws_h2_connection *connection = user_data;
+
+ if (err_code) {
+ CONNECTION_LOGF(ERROR, connection, "Message did not write to network, error %s", aws_error_name(err_code));
+ aws_h2_connection_shutdown_due_to_write_err(connection, err_code);
+ return;
+ }
+
+ CONNECTION_LOG(TRACE, connection, "Message finished writing to network. Rescheduling outgoing frame task");
+
+ /* To avoid wasting memory, we only want ONE of our written aws_io_messages in the channel at a time.
+ * Therefore, we wait until it's written to the network before trying to send another
+ * by running the outgoing-frame-task again.
+ *
+ * We also want to share the network with other channels.
+ * Therefore, when the write completes, we SCHEDULE the outgoing-frame-task
+ * to run again instead of calling the function directly.
+ * This way, if the message completes synchronously,
+ * we're not hogging the network by writing message after message in a tight loop */
+ aws_channel_schedule_task_now(channel, &connection->outgoing_frames_task);
+}
+
+static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_h2_connection *connection = arg;
+ s_write_outgoing_frames(connection, false /*first_try*/);
+}
+
+static void s_write_outgoing_frames(struct aws_h2_connection *connection, bool first_try) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(connection->thread_data.is_outgoing_frames_task_active);
+
+ struct aws_channel_slot *channel_slot = connection->base.channel_slot;
+ struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue;
+ struct aws_linked_list *outgoing_streams_list = &connection->thread_data.outgoing_streams_list;
+
+ if (connection->thread_data.is_writing_stopped) {
+ return;
+ }
+
+ /* Determine whether there's work to do, and end task immediately if there's not.
+ * Note that we stop writing DATA frames if the channel is trying to shut down */
+ bool has_control_frames = !aws_linked_list_empty(outgoing_frames_queue);
+ bool has_data_frames = !aws_linked_list_empty(outgoing_streams_list);
+ bool may_write_data_frames = (connection->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE) &&
+ !connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written;
+ bool will_write = has_control_frames || (has_data_frames && may_write_data_frames);
+
+ if (!will_write) {
+ if (!first_try) {
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Outgoing frames task stopped. has_control_frames:%d has_data_frames:%d may_write_data_frames:%d",
+ has_control_frames,
+ has_data_frames,
+ may_write_data_frames);
+ }
+
+ connection->thread_data.is_outgoing_frames_task_active = false;
+
+ if (connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written) {
+ s_finish_shutdown(connection);
+ }
+
+ return;
+ }
+
+ if (first_try) {
+ CONNECTION_LOG(TRACE, connection, "Starting outgoing frames task");
+ }
+
+ /* Acquire aws_io_message, that we will attempt to fill up */
+ struct aws_io_message *msg = aws_channel_slot_acquire_max_message_for_write(channel_slot);
+ if (AWS_UNLIKELY(!msg)) {
+ CONNECTION_LOG(ERROR, connection, "Failed to acquire message from pool, closing connection.");
+ goto error;
+ }
+
+ /* Set up callback so we can send another message when this one completes */
+ msg->on_completion = s_on_channel_write_complete;
+ msg->user_data = connection;
+
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Outgoing frames task acquired message with %zu bytes available",
+ msg->message_data.capacity - msg->message_data.len);
+
+ /* Write as many frames from outgoing_frames_queue as possible. */
+ if (s_encode_outgoing_frames_queue(connection, &msg->message_data)) {
+ goto error;
+ }
+
+ /* If outgoing_frames_queue emptied, and connection is running normally,
+ * then write as many DATA frames from outgoing_streams_list as possible. */
+ if (aws_linked_list_empty(outgoing_frames_queue) && may_write_data_frames) {
+ if (s_encode_data_from_outgoing_streams(connection, &msg->message_data)) {
+ goto error;
+ }
+ }
+
+ if (msg->message_data.len) {
+ /* Write message to channel.
+ * outgoing_frames_task will resume when message completes. */
+ CONNECTION_LOGF(TRACE, connection, "Outgoing frames task sending message of size %zu", msg->message_data.len);
+
+ if (aws_channel_slot_send_message(channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to send channel message: %s. Closing connection.",
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+ } else {
+ /* Message is empty, warn that no work is being done and reschedule the task to try again next tick.
+ * It's likely that body isn't ready, so body streaming function has no data to write yet.
+ * If this scenario turns out to be common we should implement a "pause" feature. */
+ CONNECTION_LOG(WARN, connection, "Outgoing frames task sent no data, will try again next tick.");
+
+ aws_mem_release(msg->allocator, msg);
+
+ aws_channel_schedule_task_now(channel_slot->channel, &connection->outgoing_frames_task);
+ }
+ return;
+
+error:;
+ int error_code = aws_last_error();
+
+ if (msg) {
+ aws_mem_release(msg->allocator, msg);
+ }
+
+ aws_h2_connection_shutdown_due_to_write_err(connection, error_code);
+}
+
+/* Write as many frames from outgoing_frames_queue as possible (contains all non-DATA frames) */
+static int s_encode_outgoing_frames_queue(struct aws_h2_connection *connection, struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue;
+
+ /* Write as many frames from outgoing_frames_queue as possible. */
+ while (!aws_linked_list_empty(outgoing_frames_queue)) {
+ struct aws_linked_list_node *frame_node = aws_linked_list_front(outgoing_frames_queue);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(frame_node, struct aws_h2_frame, node);
+ connection->thread_data.current_outgoing_frame = frame;
+ bool frame_complete;
+ if (aws_h2_encode_frame(&connection->thread_data.encoder, frame, output, &frame_complete)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Error encoding frame: type=%s stream=%" PRIu32 " error=%s",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ if (!frame_complete) {
+ if (output->len == 0) {
+ /* We're in trouble if an empty message isn't big enough for this frame to do any work with */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Message is too small for encoder. frame-type=%s stream=%" PRIu32 " available-space=%zu",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ output->capacity);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return AWS_OP_ERR;
+ }
+
+ CONNECTION_LOG(TRACE, connection, "Outgoing frames task filled message, and has more frames to send later");
+ break;
+ }
+
+ /* Done encoding frame, pop from queue and cleanup*/
+ aws_linked_list_remove(frame_node);
+ aws_h2_frame_destroy(frame);
+ connection->thread_data.current_outgoing_frame = NULL;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Write as many DATA frames from outgoing_streams_list as possible. */
+static int s_encode_data_from_outgoing_streams(struct aws_h2_connection *connection, struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ struct aws_linked_list *outgoing_streams_list = &connection->thread_data.outgoing_streams_list;
+ if (aws_linked_list_empty(outgoing_streams_list)) {
+ return AWS_OP_SUCCESS;
+ }
+ struct aws_linked_list *stalled_window_streams_list = &connection->thread_data.stalled_window_streams_list;
+ struct aws_linked_list *waiting_streams_list = &connection->thread_data.waiting_streams_list;
+
+ /* If a stream stalls, put it in this list until the function ends so we don't keep trying to read from it.
+ * We put it back at the end of function. */
+ struct aws_linked_list stalled_streams_list;
+ aws_linked_list_init(&stalled_streams_list);
+
+ int aws_error_code = 0;
+
+ /* We simply round-robin through streams, instead of using stream priority.
+ * Respecting priority is not required (RFC-7540 5.3), so we're ignoring it for now. This also keeps use safe
+ * from priority DOS attacks: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9513 */
+ while (!aws_linked_list_empty(outgoing_streams_list)) {
+ if (connection->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Peer connection's flow-control window is too small now %zu. Connection will stop sending DATA until "
+ "WINDOW_UPDATE is received.",
+ connection->thread_data.window_size_peer);
+ goto done;
+ }
+
+ /* Stop looping if message is so full it's not worth the bother */
+ size_t space_available = output->capacity - output->len;
+ size_t worth_trying_threshold = AWS_H2_FRAME_PREFIX_SIZE * 2;
+ if (space_available < worth_trying_threshold) {
+ CONNECTION_LOG(TRACE, connection, "Outgoing frames task filled message, and has more frames to send later");
+ goto done;
+ }
+
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(outgoing_streams_list);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node);
+
+ /* Ask stream to encode a data frame.
+ * Stream may complete itself as a result of encoding its data,
+ * in which case it will vanish from the connection's datastructures as a side-effect of this call.
+ * But if stream has more data to send, push it back into the appropriate list. */
+ int data_encode_status;
+ if (aws_h2_stream_encode_data_frame(stream, &connection->thread_data.encoder, output, &data_encode_status)) {
+
+ aws_error_code = aws_last_error();
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Connection error while encoding DATA on stream %" PRIu32 ", %s",
+ stream->base.id,
+ aws_error_name(aws_error_code));
+ goto done;
+ }
+
+ /* If stream has more data, push it into the appropriate list. */
+ switch (data_encode_status) {
+ case AWS_H2_DATA_ENCODE_COMPLETE:
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING:
+ aws_linked_list_push_back(outgoing_streams_list, node);
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED:
+ aws_linked_list_push_back(&stalled_streams_list, node);
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES:
+ stream->thread_data.waiting_for_writes = true;
+ aws_linked_list_push_back(waiting_streams_list, node);
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED:
+ aws_linked_list_push_back(stalled_window_streams_list, node);
+ AWS_H2_STREAM_LOG(
+ DEBUG,
+ stream,
+ "Peer stream's flow-control window is too small. Data frames on this stream will not be sent until "
+ "WINDOW_UPDATE. ");
+ break;
+ default:
+ CONNECTION_LOG(ERROR, connection, "Data encode status is invalid.");
+ aws_error_code = AWS_ERROR_INVALID_STATE;
+ }
+ }
+
+done:
+ /* Return any stalled streams to outgoing_streams_list */
+ while (!aws_linked_list_empty(&stalled_streams_list)) {
+ aws_linked_list_push_back(outgoing_streams_list, aws_linked_list_pop_front(&stalled_streams_list));
+ }
+
+ if (aws_error_code) {
+ return aws_raise_error(aws_error_code);
+ }
+
+ if (aws_linked_list_empty(outgoing_streams_list)) {
+ /* transition from something to write -> nothing to write */
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* If the outgoing-frames-task isn't scheduled, run it immediately. */
+void aws_h2_try_write_outgoing_frames(struct aws_h2_connection *connection) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.is_outgoing_frames_task_active) {
+ return;
+ }
+
+ connection->thread_data.is_outgoing_frames_task_active = true;
+ s_write_outgoing_frames(connection, true /*first_try*/);
+}
+
+/**
+ * Returns successfully and sets `out_stream` if stream is currently active.
+ * Returns successfully and sets `out_stream` to NULL if the frame should be ignored.
+ * Returns failed aws_h2err if it is a connection error to receive this frame.
+ */
+struct aws_h2err s_get_active_stream_for_incoming_frame(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ enum aws_h2_frame_type frame_type,
+ struct aws_h2_stream **out_stream) {
+
+ *out_stream = NULL;
+
+ /* Check active streams */
+ struct aws_hash_element *found = NULL;
+ const void *stream_id_key = (void *)(size_t)stream_id;
+ aws_hash_table_find(&connection->thread_data.active_streams_map, stream_id_key, &found);
+ if (found) {
+ /* Found it! return */
+ *out_stream = found->value;
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ bool client_initiated = (stream_id % 2) == 1;
+ bool self_initiated_stream = client_initiated && (connection->base.client_data != NULL);
+ bool peer_initiated_stream = !self_initiated_stream;
+
+ if ((self_initiated_stream && stream_id >= connection->base.next_stream_id) ||
+ (peer_initiated_stream && stream_id > connection->thread_data.latest_peer_initiated_stream_id)) {
+ /* Illegal to receive frames for a stream in the idle state (stream doesn't exist yet)
+ * (except server receiving HEADERS to start a stream, but that's handled elsewhere) */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32 " state=IDLE",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ if (peer_initiated_stream && stream_id > connection->thread_data.goaway_sent_last_stream_id) {
+ /* Once GOAWAY sent, ignore frames for peer-initiated streams whose id > last-stream-id */
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Ignoring %s frame on stream id=%" PRIu32 " because GOAWAY sent with last-stream-id=%" PRIu32,
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id,
+ connection->thread_data.goaway_sent_last_stream_id);
+
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ void *cached_value = NULL;
+ /* Stream is closed, check whether it's legal for a few more frames to trickle in */
+ if (aws_cache_find(connection->thread_data.closed_streams, stream_id_key, &cached_value)) {
+ return aws_h2err_from_last_error();
+ }
+ if (cached_value) {
+ if (frame_type == AWS_H2_FRAME_T_PRIORITY) {
+ /* If we support PRIORITY, do something here. Right now just ignore it */
+ return AWS_H2ERR_SUCCESS;
+ }
+ enum aws_h2_stream_closed_when closed_when = (enum aws_h2_stream_closed_when)(size_t)cached_value;
+ switch (closed_when) {
+ case AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM:
+ /* WINDOW_UPDATE or RST_STREAM frames can be received ... for a short period after
+ * a DATA or HEADERS frame containing an END_STREAM flag is sent.
+ * Endpoints MUST ignore WINDOW_UPDATE or RST_STREAM frames received in this state */
+ if (frame_type == AWS_H2_FRAME_T_WINDOW_UPDATE || frame_type == AWS_H2_FRAME_T_RST_STREAM) {
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Ignoring %s frame on stream id=%" PRIu32 " because END_STREAM flag was recently sent.",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return AWS_H2ERR_SUCCESS;
+ } else {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32 " after END_STREAM has been received.",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_STREAM_CLOSED);
+ }
+ break;
+ case AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED:
+ /* An endpoint that receives any frame other than PRIORITY after receiving a RST_STREAM
+ * MUST treat that as a stream error (Section 5.4.2) of type STREAM_CLOSED */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32 " after RST_STREAM has been received",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+ struct aws_h2_frame *rst_stream =
+ aws_h2_frame_new_rst_stream(connection->base.alloc, stream_id, AWS_HTTP2_ERR_STREAM_CLOSED);
+ if (!rst_stream) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Error creating RST_STREAM frame, %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream);
+ return AWS_H2ERR_SUCCESS;
+ case AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT:
+ /* An endpoint MUST ignore frames that it receives on closed streams after it has sent a RST_STREAM
+ * frame */
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Ignoring %s frame on stream id=%" PRIu32 " because RST_STREAM was recently sent.",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return AWS_H2ERR_SUCCESS;
+ break;
+ default:
+ CONNECTION_LOGF(
+ ERROR, connection, "Invalid state fo cached closed stream, stream id=%" PRIu32, stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_INTERNAL_ERROR);
+ break;
+ }
+ }
+ if (frame_type == AWS_H2_FRAME_T_PRIORITY) {
+ /* ignored if the stream has been removed from the dependency tree */
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ /* Stream closed (purged from closed_streams, or implicitly closed when its ID was skipped) */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32
+ ", no memory of closed stream (ID skipped, or removed from cache)",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+}
+
+/* Decoder callbacks */
+
+struct aws_h2err s_decoder_on_headers_begin(uint32_t stream_id, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ if (connection->base.server_data) {
+ /* Server would create new request-handler stream... */
+ return aws_h2err_from_aws_code(AWS_ERROR_UNIMPLEMENTED);
+ }
+
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_headers_begin(stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_headers_i(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type,
+ void *userdata) {
+
+ struct aws_h2_connection *connection = userdata;
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_headers_i(stream, header, name_enum, block_type);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_headers_end(
+ uint32_t stream_id,
+ bool malformed,
+ enum aws_http_header_block block_type,
+ void *userdata) {
+
+ struct aws_h2_connection *connection = userdata;
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_headers_end(stream, malformed, block_type);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised_stream_id, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ AWS_ASSERT(connection->base.client_data); /* decoder has already enforced this */
+ AWS_ASSERT(promised_stream_id % 2 == 0); /* decoder has already enforced this */
+
+ /* The identifier of a newly established stream MUST be numerically greater
+ * than all streams that the initiating endpoint has opened or reserved (RFC-7540 5.1.1) */
+ if (promised_stream_id <= connection->thread_data.latest_peer_initiated_stream_id) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Newly promised stream ID %" PRIu32 " must be higher than previously established ID %" PRIu32,
+ promised_stream_id,
+ connection->thread_data.latest_peer_initiated_stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ connection->thread_data.latest_peer_initiated_stream_id = promised_stream_id;
+
+ /* If we ever fully support PUSH_PROMISE, this is where we'd add the
+ * promised_stream_id to some reserved_streams datastructure */
+
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_PUSH_PROMISE, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_push_promise(stream, promised_stream_id);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static int s_connection_send_update_window(struct aws_h2_connection *connection, uint32_t window_size) {
+ struct aws_h2_frame *connection_window_update_frame =
+ aws_h2_frame_new_window_update(connection->base.alloc, 0, window_size);
+ if (!connection_window_update_frame) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "WINDOW_UPDATE frame on connection failed to be sent, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame);
+ connection->thread_data.window_size_self += window_size;
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_data_begin(
+ uint32_t stream_id,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream,
+ void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* A receiver that receives a flow-controlled frame MUST always account for its contribution against the connection
+ * flow-control window, unless the receiver treats this as a connection error */
+ if (aws_sub_size_checked(
+ connection->thread_data.window_size_self, payload_len, &connection->thread_data.window_size_self)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "DATA length %" PRIu32 " exceeds flow-control window %zu",
+ payload_len,
+ connection->thread_data.window_size_self);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+
+ struct aws_h2_stream *stream;
+ struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_DATA, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_data_begin(stream, payload_len, total_padding_bytes, end_stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+ /* Handle automatic updates of the connection flow window */
+ uint32_t auto_window_update;
+ if (connection->conn_manual_window_management) {
+ /* Automatically update the flow-window to account for padding, even though "manual window management"
+ * is enabled. We do this because the current API doesn't have any way to inform the user about padding,
+ * so we can't expect them to manage it themselves. */
+ auto_window_update = total_padding_bytes;
+ } else {
+ /* Automatically update the full amount we just received */
+ auto_window_update = payload_len;
+ }
+
+ if (auto_window_update != 0) {
+ if (s_connection_send_update_window(connection, auto_window_update)) {
+ return aws_h2err_from_last_error();
+ }
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Automatically updating connection window by %" PRIu32 "(%" PRIu32 " due to padding).",
+ auto_window_update,
+ total_padding_bytes);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_data_i(uint32_t stream_id, struct aws_byte_cursor data, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* Pass data to stream */
+ struct aws_h2_stream *stream;
+ struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_DATA, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_data_i(stream, data);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_end_stream(uint32_t stream_id, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* Not calling s_get_active_stream_for_incoming_frame() here because END_STREAM
+ * isn't an actual frame type. It's a flag on DATA or HEADERS frames, and we
+ * already checked the legality of those frames in their respective callbacks. */
+
+ struct aws_hash_element *found = NULL;
+ aws_hash_table_find(&connection->thread_data.active_streams_map, (void *)(size_t)stream_id, &found);
+ if (found) {
+ struct aws_h2_stream *stream = found->value;
+ struct aws_h2err err = aws_h2_stream_on_decoder_end_stream(stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_on_rst_stream(uint32_t stream_id, uint32_t h2_error_code, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* Pass RST_STREAM to stream */
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_RST_STREAM, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_rst_stream(stream, h2_error_code);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_on_ping_ack(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ if (aws_linked_list_empty(&connection->thread_data.pending_ping_queue)) {
+ CONNECTION_LOG(ERROR, connection, "Received extraneous PING ACK.");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ struct aws_h2err err;
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_ping_queue);
+ struct aws_h2_pending_ping *pending_ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node);
+ /* Check the payload */
+ if (!aws_array_eq(opaque_data, AWS_HTTP2_PING_DATA_SIZE, pending_ping->opaque_data, AWS_HTTP2_PING_DATA_SIZE)) {
+ CONNECTION_LOG(ERROR, connection, "Received PING ACK with mismatched opaque-data.");
+ err = aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ goto error;
+ }
+ uint64_t time_stamp;
+ if (aws_high_res_clock_get_ticks(&time_stamp)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed getting the time stamp when PING ACK received, error %s",
+ aws_error_name(aws_last_error()));
+ err = aws_h2err_from_last_error();
+ goto error;
+ }
+ uint64_t rtt;
+ if (aws_sub_u64_checked(time_stamp, pending_ping->started_time, &rtt)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Overflow from time stamp when PING ACK received, error %s",
+ aws_error_name(aws_last_error()));
+ err = aws_h2err_from_last_error();
+ goto error;
+ }
+ CONNECTION_LOGF(TRACE, connection, "Round trip time is %lf ms, approximately", (double)rtt / 1000000);
+ /* fire the callback */
+ if (pending_ping->on_completed) {
+ pending_ping->on_completed(&connection->base, rtt, AWS_ERROR_SUCCESS, pending_ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return AWS_H2ERR_SUCCESS;
+error:
+ if (pending_ping->on_completed) {
+ pending_ping->on_completed(&connection->base, 0 /* fake rtt */, err.aws_code, pending_ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return err;
+}
+
+static struct aws_h2err s_decoder_on_ping(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* send a PING frame with the ACK flag set in response, with an identical payload. */
+ struct aws_h2_frame *ping_ack_frame = aws_h2_frame_new_ping(connection->base.alloc, true, opaque_data);
+ if (!ping_ack_frame) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Ping ACK frame failed to be sent, error %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+
+ aws_h2_connection_enqueue_outgoing_frame(connection, ping_ack_frame);
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_on_settings(
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ struct aws_h2err err;
+ /* Once all values have been processed, the recipient MUST immediately emit a SETTINGS frame with the ACK flag
+ * set.(RFC-7540 6.5.3) */
+ CONNECTION_LOG(TRACE, connection, "Setting frame processing ends");
+ struct aws_h2_frame *settings_ack_frame = aws_h2_frame_new_settings(connection->base.alloc, NULL, 0, true);
+ if (!settings_ack_frame) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Settings ACK frame failed to be sent, error %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, settings_ack_frame);
+
+ /* Allocate a block of memory for settings_array in callback, which will only includes the settings we changed,
+ * freed once the callback finished */
+ struct aws_http2_setting *callback_array = NULL;
+ if (num_settings) {
+ callback_array = aws_mem_acquire(connection->base.alloc, num_settings * sizeof(struct aws_http2_setting));
+ if (!callback_array) {
+ return aws_h2err_from_last_error();
+ }
+ }
+ size_t callback_array_num = 0;
+
+ /* Apply the change to encoder and connection */
+ struct aws_h2_frame_encoder *encoder = &connection->thread_data.encoder;
+ for (size_t i = 0; i < num_settings; i++) {
+ if (connection->thread_data.settings_peer[settings_array[i].id] == settings_array[i].value) {
+ /* No change, don't do any work */
+ continue;
+ }
+ switch (settings_array[i].id) {
+ case AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE: {
+ aws_h2_frame_encoder_set_setting_header_table_size(encoder, settings_array[i].value);
+ } break;
+ case AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE: {
+ /* When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream
+ * flow-control windows that it maintains by the difference between the new value and the old value. */
+ int32_t size_changed =
+ settings_array[i].value - connection->thread_data.settings_peer[settings_array[i].id];
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_next(&stream_iter);
+ err = aws_h2_stream_window_size_change(stream, size_changed, false /*self*/);
+ if (aws_h2err_failed(err)) {
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "Connection error, change to SETTINGS_INITIAL_WINDOW_SIZE caused a stream's flow-control "
+ "window to exceed the maximum size");
+ goto error;
+ }
+ }
+ } break;
+ case AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE: {
+ aws_h2_frame_encoder_set_setting_max_frame_size(encoder, settings_array[i].value);
+ } break;
+ default:
+ break;
+ }
+ connection->thread_data.settings_peer[settings_array[i].id] = settings_array[i].value;
+ callback_array[callback_array_num++] = settings_array[i];
+ }
+ if (connection->on_remote_settings_change) {
+ connection->on_remote_settings_change(
+ &connection->base, callback_array, callback_array_num, connection->base.user_data);
+ }
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ memcpy(
+ connection->synced_data.settings_peer,
+ connection->thread_data.settings_peer,
+ sizeof(connection->thread_data.settings_peer));
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ aws_mem_release(connection->base.alloc, callback_array);
+ return AWS_H2ERR_SUCCESS;
+error:
+ aws_mem_release(connection->base.alloc, callback_array);
+ return err;
+}
+
+static struct aws_h2err s_decoder_on_settings_ack(void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ if (aws_linked_list_empty(&connection->thread_data.pending_settings_queue)) {
+ CONNECTION_LOG(ERROR, connection, "Received a malicious extra SETTINGS acknowledgment");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ struct aws_h2err err;
+ struct aws_h2_pending_settings *pending_settings = NULL;
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_settings_queue);
+ pending_settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node);
+
+ struct aws_http2_setting *settings_array = pending_settings->settings_array;
+ /* Apply the settings */
+ struct aws_h2_decoder *decoder = connection->thread_data.decoder;
+ for (size_t i = 0; i < pending_settings->num_settings; i++) {
+ if (connection->thread_data.settings_self[settings_array[i].id] == settings_array[i].value) {
+ /* No change, don't do any work */
+ continue;
+ }
+ switch (settings_array[i].id) {
+ case AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE: {
+ aws_h2_decoder_set_setting_header_table_size(decoder, settings_array[i].value);
+ } break;
+ case AWS_HTTP2_SETTINGS_ENABLE_PUSH: {
+ aws_h2_decoder_set_setting_enable_push(decoder, settings_array[i].value);
+ } break;
+ case AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE: {
+ /* When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream
+ * flow-control windows that it maintains by the difference between the new value and the old value. */
+ int32_t size_changed =
+ settings_array[i].value - connection->thread_data.settings_self[settings_array[i].id];
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_next(&stream_iter);
+ err = aws_h2_stream_window_size_change(stream, size_changed, true /*self*/);
+ if (aws_h2err_failed(err)) {
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "Connection error, change to SETTINGS_INITIAL_WINDOW_SIZE from internal caused a stream's "
+ "flow-control window to exceed the maximum size");
+ goto error;
+ }
+ }
+ } break;
+ case AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE: {
+ aws_h2_decoder_set_setting_max_frame_size(decoder, settings_array[i].value);
+ } break;
+ default:
+ break;
+ }
+ connection->thread_data.settings_self[settings_array[i].id] = settings_array[i].value;
+ }
+ /* invoke the change settings compeleted user callback */
+ if (pending_settings->on_completed) {
+ pending_settings->on_completed(&connection->base, AWS_ERROR_SUCCESS, pending_settings->user_data);
+ }
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ memcpy(
+ connection->synced_data.settings_self,
+ connection->thread_data.settings_self,
+ sizeof(connection->thread_data.settings_self));
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ /* clean up the pending_settings */
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return AWS_H2ERR_SUCCESS;
+error:
+ /* invoke the user callback with error code */
+ if (pending_settings->on_completed) {
+ pending_settings->on_completed(&connection->base, err.aws_code, pending_settings->user_data);
+ }
+ /* clean up the pending settings here */
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return err;
+}
+
+static struct aws_h2err s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ if (stream_id == 0) {
+ /* Let's update the connection flow-control window size */
+ if (window_size_increment == 0) {
+ /* flow-control window increment of 0 MUST be treated as error (RFC7540 6.9.1) */
+ CONNECTION_LOG(ERROR, connection, "Window update frame with 0 increment size");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ if (connection->thread_data.window_size_peer + window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) {
+ /* We MUST NOT allow a flow-control window to exceed the max */
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "Window update frame causes the connection flow-control window exceeding the maximum size");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+ if (connection->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Peer connection's flow-control window is resumed from too small to %" PRIu32
+ ". Connection will resume sending DATA.",
+ window_size_increment);
+ }
+ connection->thread_data.window_size_peer += window_size_increment;
+ return AWS_H2ERR_SUCCESS;
+ } else {
+ /* Update the flow-control window size for stream */
+ struct aws_h2_stream *stream;
+ bool window_resume;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_WINDOW_UPDATE, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ if (stream) {
+ err = aws_h2_stream_on_decoder_window_update(stream, window_size_increment, &window_resume);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ if (window_resume) {
+ /* Set the stream free from stalled list */
+ AWS_H2_STREAM_LOGF(
+ DEBUG,
+ stream,
+ "Peer stream's flow-control window is resumed from 0 or negative to %" PRIu32
+ " Stream will resume sending data.",
+ stream->thread_data.window_size_peer);
+ aws_linked_list_remove(&stream->node);
+ aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node);
+ }
+ }
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_goaway(
+ uint32_t last_stream,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data,
+ void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ if (last_stream > connection->thread_data.goaway_received_last_stream_id) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Received GOAWAY with invalid last-stream-id=%" PRIu32 ", must not exceed previous last-stream-id=%" PRIu32,
+ last_stream,
+ connection->thread_data.goaway_received_last_stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ /* stop sending any new stream and making new request */
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_GOAWAY_RECEIVED;
+ connection->synced_data.goaway_received_last_stream_id = last_stream;
+ connection->synced_data.goaway_received_http2_error_code = error_code;
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ connection->thread_data.goaway_received_last_stream_id = last_stream;
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Received GOAWAY error-code=%s(0x%x) last-stream-id=%" PRIu32,
+ aws_http2_error_code_to_str(error_code),
+ error_code,
+ last_stream);
+ /* Complete activated streams whose id is higher than last_stream, since they will not process by peer. We should
+ * treat them as they had never been created at all.
+ * This would be more efficient if we could iterate streams in reverse-id order */
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_next(&stream_iter);
+ if (stream->base.id > last_stream) {
+ AWS_H2_STREAM_LOG(
+ DEBUG,
+ stream,
+ "stream ID is higher than GOAWAY last stream ID, please retry this stream on a new connection.");
+ s_stream_complete(connection, stream, AWS_ERROR_HTTP_GOAWAY_RECEIVED);
+ }
+ }
+ if (connection->on_goaway_received) {
+ /* Inform user about goaway received and the error code. */
+ connection->on_goaway_received(
+ &connection->base, last_stream, error_code, debug_data, connection->base.user_data);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* End decoder callbacks */
+
+static int s_send_connection_preface_client_string(struct aws_h2_connection *connection) {
+
+ /* Just send the magic string on its own aws_io_message. */
+ struct aws_io_message *msg = aws_channel_acquire_message_from_pool(
+ connection->base.channel_slot->channel,
+ AWS_IO_MESSAGE_APPLICATION_DATA,
+ aws_h2_connection_preface_client_string.len);
+ if (!msg) {
+ goto error;
+ }
+
+ if (!aws_byte_buf_write_from_whole_cursor(&msg->message_data, aws_h2_connection_preface_client_string)) {
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ if (aws_channel_slot_send_message(connection->base.channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ if (msg) {
+ aws_mem_release(msg->allocator, msg);
+ }
+ return AWS_OP_ERR;
+}
+
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(slot->channel));
+ struct aws_h2_connection *connection = handler->impl;
+
+ connection->base.channel_slot = slot;
+
+ /* Acquire a hold on the channel to prevent its destruction until the user has
+ * given the go-ahead via aws_http_connection_release() */
+ aws_channel_acquire_hold(slot->channel);
+
+ /* Send HTTP/2 connection preface (RFC-7540 3.5)
+ * - clients must send magic string
+ * - both client and server must send SETTINGS frame */
+
+ if (connection->base.client_data) {
+ if (s_send_connection_preface_client_string(connection)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to send client connection preface string, %s",
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ }
+ struct aws_h2_pending_settings *init_pending_settings = connection->thread_data.init_pending_settings;
+ aws_linked_list_push_back(&connection->thread_data.pending_settings_queue, &init_pending_settings->node);
+ connection->thread_data.init_pending_settings = NULL;
+ /* Set user_data here, the user_data is valid now */
+ init_pending_settings->user_data = connection->base.user_data;
+
+ struct aws_h2_frame *init_settings_frame = aws_h2_frame_new_settings(
+ connection->base.alloc,
+ init_pending_settings->settings_array,
+ init_pending_settings->num_settings,
+ false /*ACK*/);
+ if (!init_settings_frame) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to create the initial settings frame, error %s",
+ aws_error_name(aws_last_error()));
+ aws_mem_release(connection->base.alloc, init_pending_settings);
+ goto error;
+ }
+ /* enqueue the initial settings frame here */
+ aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &init_settings_frame->node);
+
+ /* If not manual connection window management, update the connection window to max. */
+ if (!connection->conn_manual_window_management) {
+ uint32_t initial_window_update_size = AWS_H2_WINDOW_UPDATE_MAX - AWS_H2_INIT_WINDOW_SIZE;
+ struct aws_h2_frame *connection_window_update_frame =
+ aws_h2_frame_new_window_update(connection->base.alloc, 0 /* stream_id */, initial_window_update_size);
+ AWS_ASSERT(connection_window_update_frame);
+ /* enqueue the windows update frame here */
+ aws_linked_list_push_back(
+ &connection->thread_data.outgoing_frames_queue, &connection_window_update_frame->node);
+ connection->thread_data.window_size_self += initial_window_update_size;
+ }
+ aws_h2_try_write_outgoing_frames(connection);
+ return;
+
+error:
+ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error());
+}
+
+static void s_stream_complete(struct aws_h2_connection *connection, struct aws_h2_stream *stream, int error_code) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ /* Nice logging */
+ if (error_code) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Stream completed with error %d (%s).", error_code, aws_error_name(error_code));
+ } else if (stream->base.client_data) {
+ int status = stream->base.client_data->response_status;
+ AWS_H2_STREAM_LOGF(
+ DEBUG, stream, "Client stream complete, response status %d (%s)", status, aws_http_status_text(status));
+ } else {
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Server stream complete");
+ }
+
+ /* Remove stream from active_streams_map and outgoing_stream_list (if it was in them at all) */
+ aws_hash_table_remove(&connection->thread_data.active_streams_map, (void *)(size_t)stream->base.id, NULL, NULL);
+ if (stream->node.next) {
+ aws_linked_list_remove(&stream->node);
+ }
+
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0 &&
+ connection->thread_data.incoming_timestamp_ns != 0) {
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ /* transition from something to read -> nothing to read and nothing to write */
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+ connection->thread_data.stats.was_inactive = true;
+ connection->thread_data.incoming_timestamp_ns = 0;
+ }
+
+ aws_h2_stream_complete(stream, error_code);
+
+ /* release connection's hold on stream */
+ aws_http_stream_release(&stream->base);
+}
+
+int aws_h2_connection_on_stream_closed(
+ struct aws_h2_connection *connection,
+ struct aws_h2_stream *stream,
+ enum aws_h2_stream_closed_when closed_when,
+ int aws_error_code) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(stream->thread_data.state == AWS_H2_STREAM_STATE_CLOSED);
+ AWS_PRECONDITION(stream->base.id != 0);
+
+ uint32_t stream_id = stream->base.id;
+
+ /* Mark stream complete. This removes the stream from any "active" datastructures,
+ * invokes its completion callback, and releases its refcount. */
+ s_stream_complete(connection, stream, aws_error_code);
+ stream = NULL; /* Reference released, do not touch again */
+
+ if (s_record_closed_stream(connection, stream_id, closed_when)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_record_closed_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ enum aws_h2_stream_closed_when closed_when) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (aws_cache_put(connection->thread_data.closed_streams, (void *)(size_t)stream_id, (void *)(size_t)closed_when)) {
+ CONNECTION_LOG(ERROR, connection, "Failed inserting ID into cache of recently closed streams");
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_h2_connection_send_rst_and_close_reserved_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ uint32_t h2_error_code) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ struct aws_h2_frame *rst_stream = aws_h2_frame_new_rst_stream(connection->base.alloc, stream_id, h2_error_code);
+ if (!rst_stream) {
+ CONNECTION_LOGF(ERROR, connection, "Error creating RST_STREAM frame, %s", aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream);
+
+ /* If we ever fully support PUSH_PROMISE, this is where we'd remove the
+ * promised_stream_id from some reserved_streams datastructure */
+
+ return s_record_closed_stream(connection, stream_id, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT);
+}
+
+/* Move stream into "active" datastructures and notify stream that it can send frames now */
+static void s_move_stream_to_thread(
+ struct aws_h2_connection *connection,
+ struct aws_h2_stream *stream,
+ int new_stream_error_code) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (new_stream_error_code) {
+ aws_raise_error(new_stream_error_code);
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Failed activating stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ uint32_t max_concurrent_streams = connection->thread_data.settings_peer[AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS];
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) >= max_concurrent_streams) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Failed activating stream, max concurrent streams are reached");
+ aws_raise_error(AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED);
+ goto error;
+ }
+
+ if (aws_hash_table_put(
+ &connection->thread_data.active_streams_map, (void *)(size_t)stream->base.id, stream, NULL)) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Failed inserting stream into map");
+ goto error;
+ }
+
+ enum aws_h2_stream_body_state body_state = AWS_H2_STREAM_BODY_STATE_NONE;
+ if (aws_h2_stream_on_activated(stream, &body_state)) {
+ goto error;
+ }
+
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 1) {
+ /* transition from nothing to read -> something to read */
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ connection->thread_data.incoming_timestamp_ns = now_ns;
+ }
+
+ switch (body_state) {
+ case AWS_H2_STREAM_BODY_STATE_WAITING_WRITES:
+ aws_linked_list_push_back(&connection->thread_data.waiting_streams_list, &stream->node);
+ break;
+ case AWS_H2_STREAM_BODY_STATE_ONGOING:
+ aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node);
+ break;
+ default:
+ break;
+ }
+ return;
+error:
+ /* If the stream got into any datastructures, s_stream_complete() will remove it */
+ s_stream_complete(connection, stream, aws_last_error());
+}
+
+/* Perform on-thread work that is triggered by calls to the connection/stream API */
+static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_h2_connection *connection = arg;
+
+ struct aws_linked_list pending_frames;
+ aws_linked_list_init(&pending_frames);
+
+ struct aws_linked_list pending_streams;
+ aws_linked_list_init(&pending_streams);
+
+ struct aws_linked_list pending_settings;
+ aws_linked_list_init(&pending_settings);
+
+ struct aws_linked_list pending_ping;
+ aws_linked_list_init(&pending_ping);
+
+ struct aws_linked_list pending_goaway;
+ aws_linked_list_init(&pending_goaway);
+
+ size_t window_update_size;
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ connection->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ aws_linked_list_swap_contents(&connection->synced_data.pending_frame_list, &pending_frames);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_stream_list, &pending_streams);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_settings_list, &pending_settings);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_ping_list, &pending_ping);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_goaway_list, &pending_goaway);
+ window_update_size = connection->synced_data.window_update_size;
+ connection->synced_data.window_update_size = 0;
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* Enqueue new pending control frames */
+ while (!aws_linked_list_empty(&pending_frames)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_frames);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node);
+ aws_h2_connection_enqueue_outgoing_frame(connection, frame);
+ }
+
+ /* We already enqueued the window_update frame, just apply the change and let our peer check this value, no matter
+ * overflow happens or not. Peer will detect it for us. */
+ connection->thread_data.window_size_self =
+ aws_add_size_saturating(connection->thread_data.window_size_self, window_update_size);
+
+ /* Process new pending_streams */
+ while (!aws_linked_list_empty(&pending_streams)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_streams);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node);
+ s_move_stream_to_thread(connection, stream, new_stream_error_code);
+ }
+
+ /* Move pending settings to thread data */
+ while (!aws_linked_list_empty(&pending_settings)) {
+ aws_linked_list_push_back(
+ &connection->thread_data.pending_settings_queue, aws_linked_list_pop_front(&pending_settings));
+ }
+
+ /* Move pending PING to thread data */
+ while (!aws_linked_list_empty(&pending_ping)) {
+ aws_linked_list_push_back(
+ &connection->thread_data.pending_ping_queue, aws_linked_list_pop_front(&pending_ping));
+ }
+
+ /* Send user requested goaways */
+ while (!aws_linked_list_empty(&pending_goaway)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_goaway);
+ struct aws_h2_pending_goaway *goaway = AWS_CONTAINER_OF(node, struct aws_h2_pending_goaway, node);
+ s_send_goaway(connection, goaway->http2_error, goaway->allow_more_streams, &goaway->debug_data);
+ aws_mem_release(connection->base.alloc, goaway);
+ }
+
+ /* It's likely that frames were queued while processing cross-thread work.
+ * If so, try writing them now */
+ aws_h2_try_write_outgoing_frames(connection);
+}
+
+int aws_h2_stream_activate(struct aws_http_stream *stream) {
+ struct aws_h2_stream *h2_stream = AWS_CONTAINER_OF(stream, struct aws_h2_stream, base);
+
+ struct aws_http_connection *base_connection = stream->owning_connection;
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h2_connection, base);
+
+ int err;
+ bool was_cross_thread_work_scheduled = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_acquire_stream_and_connection_lock(h2_stream, connection);
+
+ if (stream->id) {
+ /* stream has already been activated. */
+ s_release_stream_and_connection_lock(h2_stream, connection);
+ return AWS_OP_SUCCESS;
+ }
+
+ err = connection->synced_data.new_stream_error_code;
+ if (err) {
+ s_release_stream_and_connection_lock(h2_stream, connection);
+ goto error;
+ }
+
+ stream->id = aws_http_connection_get_next_stream_id(base_connection);
+
+ if (stream->id) {
+ /* success */
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+
+ aws_linked_list_push_back(&connection->synced_data.pending_stream_list, &h2_stream->node);
+ h2_stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_ACTIVE;
+ }
+
+ s_release_stream_and_connection_lock(h2_stream, connection);
+ } /* END CRITICAL SECTION */
+
+ if (!stream->id) {
+ /* aws_http_connection_get_next_stream_id() raises its own error. */
+ return AWS_OP_ERR;
+ }
+
+ /* connection keeps activated stream alive until stream completes */
+ aws_atomic_fetch_add(&stream->refcount, 1);
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to activate the stream id=%p, new streams are not allowed now. error %d (%s)",
+ (void *)stream,
+ err,
+ aws_error_name(err));
+ return aws_raise_error(err);
+}
+
+static struct aws_http_stream *s_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(client_connection, struct aws_h2_connection, base);
+
+ /* #TODO: http/2-ify the request (ex: add ":method" header). Should we mutate a copy or the original? Validate?
+ * Or just pass pointer to headers struct and let encoder transform it while encoding? */
+
+ struct aws_h2_stream *stream = aws_h2_stream_new_request(client_connection, options);
+ if (!stream) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to create stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return NULL;
+ }
+
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (new_stream_error_code) {
+ aws_raise_error(new_stream_error_code);
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Cannot create request stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Created HTTP/2 request stream"); /* #TODO: print method & path */
+ return &stream->base;
+
+error:
+ /* Force destruction of the stream, avoiding ref counting */
+ stream->base.vtable->destroy(&stream->base);
+ return NULL;
+}
+
+static void s_connection_close(struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+
+ /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */
+ s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_SUCCESS);
+}
+
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ if (!connection->synced_data.new_stream_error_code) {
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+}
+
+static bool s_connection_is_open(const struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ bool is_open;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ is_open = connection->synced_data.is_open;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ return is_open;
+}
+
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ return new_stream_error_code == 0;
+}
+
+static void s_connection_update_window(struct aws_http_connection *connection_base, uint32_t increment_size) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ if (!increment_size) {
+ /* Silently do nothing. */
+ return;
+ }
+ if (!connection->conn_manual_window_management) {
+ /* auto-mode, manual update window is not supported, silently do nothing with warning log. */
+ CONNECTION_LOG(
+ DEBUG,
+ connection,
+ "Connection manual window management is off, update window operations are not supported.");
+ return;
+ }
+ struct aws_h2_frame *connection_window_update_frame =
+ aws_h2_frame_new_window_update(connection->base.alloc, 0, increment_size);
+ if (!connection_window_update_frame) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to create WINDOW_UPDATE frame on connection, error %s",
+ aws_error_name(aws_last_error()));
+ /* OOM should result in a crash. And the increment size is too huge is the only other failure case, which will
+ * result in overflow. */
+ goto overflow;
+ }
+
+ int err = 0;
+ bool cross_thread_work_should_schedule = false;
+ bool connection_open = false;
+ size_t sum_size = 0;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ err |= aws_add_size_checked(connection->synced_data.window_update_size, increment_size, &sum_size);
+ err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX;
+ connection_open = connection->synced_data.is_open;
+
+ if (!err && connection_open) {
+ cross_thread_work_should_schedule = !connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(
+ &connection->synced_data.pending_frame_list, &connection_window_update_frame->node);
+ connection->synced_data.window_update_size = sum_size;
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (err) {
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "The connection's flow-control windows has been incremented beyond 2**31 -1, the max for HTTP/2. The ");
+ aws_h2_frame_destroy(connection_window_update_frame);
+ goto overflow;
+ }
+
+ if (cross_thread_work_should_schedule) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ if (!connection_open) {
+ /* connection already closed, just do nothing */
+ aws_h2_frame_destroy(connection_window_update_frame);
+ return;
+ }
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "User requested to update the HTTP/2 connection's flow-control windows by %" PRIu32 ".",
+ increment_size);
+ return;
+overflow:
+ /* Shutdown the connection as overflow detected */
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ false /*stop_writing*/,
+ true /*schedule_shutdown*/,
+ AWS_ERROR_OVERFLOW_DETECTED);
+}
+
+static int s_connection_change_settings(
+ struct aws_http_connection *connection_base,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+
+ if (!settings_array && num_settings) {
+ CONNECTION_LOG(ERROR, connection, "Settings_array is NULL and num_settings is not zero.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_h2_pending_settings *pending_settings =
+ s_new_pending_settings(connection->base.alloc, settings_array, num_settings, on_completed, user_data);
+ if (!pending_settings) {
+ return AWS_OP_ERR;
+ }
+ struct aws_h2_frame *settings_frame =
+ aws_h2_frame_new_settings(connection->base.alloc, settings_array, num_settings, false /*ACK*/);
+ if (!settings_frame) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Failed to create settings frame, error %s", aws_error_name(aws_last_error()));
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return AWS_OP_ERR;
+ }
+
+ bool was_cross_thread_work_scheduled = false;
+ bool connection_open;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection_open = connection->synced_data.is_open;
+ if (!connection_open) {
+ s_unlock_synced_data(connection);
+ goto closed;
+ }
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(&connection->synced_data.pending_frame_list, &settings_frame->node);
+ aws_linked_list_push_back(&connection->synced_data.pending_settings_list, &pending_settings->node);
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+closed:
+ CONNECTION_LOG(ERROR, connection, "Failed to change settings, connection is closed or closing.");
+ aws_h2_frame_destroy(settings_frame);
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+static int s_connection_send_ping(
+ struct aws_http_connection *connection_base,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ if (optional_opaque_data && optional_opaque_data->len != 8) {
+ CONNECTION_LOG(ERROR, connection, "Only 8 bytes opaque data supported for PING in HTTP/2");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ uint64_t time_stamp;
+ if (aws_high_res_clock_get_ticks(&time_stamp)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed getting the time stamp to start PING, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ struct aws_h2_pending_ping *pending_ping =
+ s_new_pending_ping(connection->base.alloc, optional_opaque_data, time_stamp, user_data, on_completed);
+ if (!pending_ping) {
+ return AWS_OP_ERR;
+ }
+ struct aws_h2_frame *ping_frame =
+ aws_h2_frame_new_ping(connection->base.alloc, false /*ACK*/, pending_ping->opaque_data);
+ if (!ping_frame) {
+ CONNECTION_LOGF(ERROR, connection, "Failed to create PING frame, error %s", aws_error_name(aws_last_error()));
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return AWS_OP_ERR;
+ }
+
+ bool was_cross_thread_work_scheduled = false;
+ bool connection_open;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection_open = connection->synced_data.is_open;
+ if (!connection_open) {
+ s_unlock_synced_data(connection);
+ goto closed;
+ }
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(&connection->synced_data.pending_frame_list, &ping_frame->node);
+ aws_linked_list_push_back(&connection->synced_data.pending_ping_list, &pending_ping->node);
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+
+closed:
+ CONNECTION_LOG(ERROR, connection, "Failed to send ping, connection is closed or closing.");
+ aws_h2_frame_destroy(ping_frame);
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+static void s_connection_send_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ struct aws_h2_pending_goaway *pending_goaway =
+ s_new_pending_goaway(connection->base.alloc, http2_error, allow_more_streams, optional_debug_data);
+
+ bool was_cross_thread_work_scheduled = false;
+ bool connection_open;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection_open = connection->synced_data.is_open;
+ if (!connection_open) {
+ s_unlock_synced_data(connection);
+ CONNECTION_LOG(DEBUG, connection, "Goaway not sent, connection is closed or closing.");
+ aws_mem_release(connection->base.alloc, pending_goaway);
+ return;
+ }
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(&connection->synced_data.pending_goaway_list, &pending_goaway->node);
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (allow_more_streams && (http2_error != AWS_HTTP2_ERR_NO_ERROR)) {
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Send goaway with allow more streams on and non-zero error code %s(0x%x)",
+ aws_http2_error_code_to_str(http2_error),
+ http2_error);
+ }
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+}
+
+static void s_get_settings_general(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT],
+ bool local) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ uint32_t synced_settings[AWS_HTTP2_SETTINGS_END_RANGE];
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ if (local) {
+ memcpy(
+ synced_settings, connection->synced_data.settings_self, sizeof(connection->synced_data.settings_self));
+ } else {
+ memcpy(
+ synced_settings, connection->synced_data.settings_peer, sizeof(connection->synced_data.settings_peer));
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ for (int i = AWS_HTTP2_SETTINGS_BEGIN_RANGE; i < AWS_HTTP2_SETTINGS_END_RANGE; i++) {
+ /* settings range begin with 1, store them into 0-based array of aws_http2_setting */
+ out_settings[i - 1].id = i;
+ out_settings[i - 1].value = synced_settings[i];
+ }
+ return;
+}
+
+static void s_connection_get_local_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ s_get_settings_general(connection_base, out_settings, true /*local*/);
+}
+
+static void s_connection_get_remote_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ s_get_settings_general(connection_base, out_settings, false /*local*/);
+}
+
+/* Send a GOAWAY with the lowest possible last-stream-id or graceful shutdown warning */
+static void s_send_goaway(
+ struct aws_h2_connection *connection,
+ uint32_t h2_error_code,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ uint32_t last_stream_id = allow_more_streams ? AWS_H2_STREAM_ID_MAX
+ : aws_min_u32(
+ connection->thread_data.latest_peer_initiated_stream_id,
+ connection->thread_data.goaway_sent_last_stream_id);
+
+ if (last_stream_id > connection->thread_data.goaway_sent_last_stream_id) {
+ CONNECTION_LOG(
+ DEBUG,
+ connection,
+ "GOAWAY frame with lower last stream id has been sent, ignoring sending graceful shutdown warning.");
+ return;
+ }
+
+ struct aws_byte_cursor debug_data;
+ AWS_ZERO_STRUCT(debug_data);
+ if (optional_debug_data) {
+ debug_data = *optional_debug_data;
+ }
+
+ struct aws_h2_frame *goaway =
+ aws_h2_frame_new_goaway(connection->base.alloc, last_stream_id, h2_error_code, debug_data);
+ if (!goaway) {
+ CONNECTION_LOGF(ERROR, connection, "Error creating GOAWAY frame, %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ connection->thread_data.goaway_sent_last_stream_id = last_stream_id;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ connection->synced_data.goaway_sent_last_stream_id = last_stream_id;
+ connection->synced_data.goaway_sent_http2_error_code = h2_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ aws_h2_connection_enqueue_outgoing_frame(connection, goaway);
+ return;
+
+error:
+ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error());
+}
+
+static int s_connection_get_sent_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ uint32_t sent_last_stream_id;
+ uint32_t sent_http2_error;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ sent_last_stream_id = connection->synced_data.goaway_sent_last_stream_id;
+ sent_http2_error = connection->synced_data.goaway_sent_http2_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX;
+ if (sent_last_stream_id == max_stream_id + 1) {
+ CONNECTION_LOG(ERROR, connection, "No GOAWAY has been sent so far.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ *out_http2_error = sent_http2_error;
+ *out_last_stream_id = sent_last_stream_id;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_connection_get_received_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ uint32_t received_last_stream_id = 0;
+ uint32_t received_http2_error = 0;
+ bool goaway_not_ready = false;
+ uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ if (connection->synced_data.goaway_received_last_stream_id == max_stream_id + 1) {
+ goaway_not_ready = true;
+ } else {
+ received_last_stream_id = connection->synced_data.goaway_received_last_stream_id;
+ received_http2_error = connection->synced_data.goaway_received_http2_error_code;
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (goaway_not_ready) {
+ CONNECTION_LOG(ERROR, connection, "No GOAWAY has been received so far.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ *out_http2_error = received_http2_error;
+ *out_last_stream_id = received_last_stream_id;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+ (void)slot;
+ struct aws_h2_connection *connection = handler->impl;
+
+ CONNECTION_LOGF(TRACE, connection, "Begin processing message of size %zu.", message->message_data.len);
+
+ if (connection->thread_data.is_reading_stopped) {
+ CONNECTION_LOG(ERROR, connection, "Cannot process message because connection is shutting down.");
+ goto clean_up;
+ }
+
+ /* Any error that bubbles up from the decoder or its callbacks is treated as
+ * a Connection Error (a GOAWAY frames is sent, and the connection is closed) */
+ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data);
+ struct aws_h2err err = aws_h2_decode(connection->thread_data.decoder, &message_cursor);
+ if (aws_h2err_failed(err)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failure while receiving frames, %s. Sending GOAWAY %s(0x%x) and closing connection",
+ aws_error_name(err.aws_code),
+ aws_http2_error_code_to_str(err.h2_code),
+ err.h2_code);
+ goto shutdown;
+ }
+
+ /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer,
+ * so we can just keep the aws_channel's read-window wide open */
+ if (aws_channel_slot_increment_read_window(slot, message->message_data.len)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Incrementing read window failed, error %d (%s). Closing connection",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ err = aws_h2err_from_last_error();
+ goto shutdown;
+ }
+
+ goto clean_up;
+
+shutdown:
+ s_send_goaway(connection, err.h2_code, false /*allow_more_streams*/, NULL /*optional_debug_data*/);
+ aws_h2_try_write_outgoing_frames(connection);
+ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, err.aws_code);
+
+clean_up:
+ aws_mem_release(message->allocator, message);
+
+ /* Flush any outgoing frames that might have been queued as a result of decoder callbacks. */
+ aws_h2_try_write_outgoing_frames(connection);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ (void)handler;
+ (void)slot;
+ (void)message;
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+}
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size) {
+
+ (void)handler;
+ (void)slot;
+ (void)size;
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+}
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ struct aws_h2_connection *connection = handler->impl;
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Channel shutting down in %s direction with error code %d (%s).",
+ (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write",
+ error_code,
+ aws_error_name(error_code));
+
+ if (dir == AWS_CHANNEL_DIR_READ) {
+ /* This call ensures that no further streams will be created. */
+ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, error_code);
+ /* Send user requested GOAWAY, if they haven't been sent before. It's OK to access
+ * synced_data.pending_goaway_list without holding the lock because no more user_requested GOAWAY can be added
+ * after s_stop() has been invoked. */
+ if (!aws_linked_list_empty(&connection->synced_data.pending_goaway_list)) {
+ while (!aws_linked_list_empty(&connection->synced_data.pending_goaway_list)) {
+ struct aws_linked_list_node *node =
+ aws_linked_list_pop_front(&connection->synced_data.pending_goaway_list);
+ struct aws_h2_pending_goaway *goaway = AWS_CONTAINER_OF(node, struct aws_h2_pending_goaway, node);
+ s_send_goaway(connection, goaway->http2_error, goaway->allow_more_streams, &goaway->debug_data);
+ aws_mem_release(connection->base.alloc, goaway);
+ }
+ aws_h2_try_write_outgoing_frames(connection);
+ }
+
+ /* Send GOAWAY if none have been sent so far,
+ * or if we've only sent a "graceful shutdown warning" that didn't name a last-stream-id */
+ if (connection->thread_data.goaway_sent_last_stream_id == AWS_H2_STREAM_ID_MAX) {
+ s_send_goaway(
+ connection,
+ error_code ? AWS_HTTP2_ERR_INTERNAL_ERROR : AWS_HTTP2_ERR_NO_ERROR,
+ false /*allow_more_streams*/,
+ NULL /*optional_debug_data*/);
+ aws_h2_try_write_outgoing_frames(connection);
+ }
+ aws_channel_slot_on_handler_shutdown_complete(
+ slot, AWS_CHANNEL_DIR_READ, error_code, free_scarce_resources_immediately);
+
+ } else /* AWS_CHANNEL_DIR_WRITE */ {
+ connection->thread_data.channel_shutdown_error_code = error_code;
+ connection->thread_data.channel_shutdown_immediately = free_scarce_resources_immediately;
+ connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written = true;
+
+ /* We'd prefer to wait until we know GOAWAY has been written, but don't wait if... */
+ if (free_scarce_resources_immediately /* we must finish ASAP */ ||
+ connection->thread_data.is_writing_stopped /* write will never complete */ ||
+ !connection->thread_data.is_outgoing_frames_task_active /* write is already complete */) {
+
+ s_finish_shutdown(connection);
+ } else {
+ CONNECTION_LOG(TRACE, connection, "HTTP/2 handler will finish shutdown once GOAWAY frame is written");
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_finish_shutdown(struct aws_h2_connection *connection) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written);
+
+ CONNECTION_LOG(TRACE, connection, "Finishing HTTP/2 handler shutdown");
+
+ connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written = false;
+
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ true /*stop_writing*/,
+ false /*schedule_shutdown*/,
+ connection->thread_data.channel_shutdown_error_code);
+
+ /* Remove remaining streams from internal datastructures and mark them as complete. */
+
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_delete(&stream_iter, true);
+ aws_hash_iter_next(&stream_iter);
+
+ s_stream_complete(connection, stream, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ /* It's OK to access synced_data without holding the lock because
+ * no more streams or user-requested control frames can be added after s_stop() has been invoked. */
+ while (!aws_linked_list_empty(&connection->synced_data.pending_stream_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_stream_list);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node);
+ s_stream_complete(connection, stream, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ while (!aws_linked_list_empty(&connection->synced_data.pending_frame_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_frame_list);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node);
+ aws_h2_frame_destroy(frame);
+ }
+
+ /* invoke pending callbacks haven't moved into thread, and clean up the data */
+ while (!aws_linked_list_empty(&connection->synced_data.pending_settings_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_settings_list);
+ struct aws_h2_pending_settings *settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node);
+ if (settings->on_completed) {
+ settings->on_completed(&connection->base, AWS_ERROR_HTTP_CONNECTION_CLOSED, settings->user_data);
+ }
+ aws_mem_release(connection->base.alloc, settings);
+ }
+ while (!aws_linked_list_empty(&connection->synced_data.pending_ping_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_ping_list);
+ struct aws_h2_pending_ping *ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node);
+ if (ping->on_completed) {
+ ping->on_completed(&connection->base, 0 /*fake rtt*/, AWS_ERROR_HTTP_CONNECTION_CLOSED, ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, ping);
+ }
+
+ /* invoke pending callbacks moved into thread, and clean up the data */
+ while (!aws_linked_list_empty(&connection->thread_data.pending_settings_queue)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_settings_queue);
+ struct aws_h2_pending_settings *pending_settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node);
+ /* fire the user callback with error */
+ if (pending_settings->on_completed) {
+ pending_settings->on_completed(
+ &connection->base, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_settings->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_settings);
+ }
+ while (!aws_linked_list_empty(&connection->thread_data.pending_ping_queue)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_ping_queue);
+ struct aws_h2_pending_ping *pending_ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node);
+ /* fire the user callback with error */
+ if (pending_ping->on_completed) {
+ pending_ping->on_completed(
+ &connection->base, 0 /*fake rtt*/, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_ping);
+ }
+ aws_channel_slot_on_handler_shutdown_complete(
+ connection->base.channel_slot,
+ AWS_CHANNEL_DIR_WRITE,
+ connection->thread_data.channel_shutdown_error_code,
+ connection->thread_data.channel_shutdown_immediately);
+}
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) {
+ (void)handler;
+
+ /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer,
+ * so we can just keep the aws_channel's read-window wide open */
+ return SIZE_MAX;
+}
+
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+
+ /* "All frames begin with a fixed 9-octet header followed by a variable-length payload" (RFC-7540 4.1) */
+ return 9;
+}
+
+static void s_reset_statistics(struct aws_channel_handler *handler) {
+ struct aws_h2_connection *connection = handler->impl;
+ aws_crt_statistics_http2_channel_reset(&connection->thread_data.stats);
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0) {
+ /* Check the current state */
+ connection->thread_data.stats.was_inactive = true;
+ }
+ return;
+}
+
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) {
+
+ struct aws_h2_connection *connection = handler->impl;
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ /* TODO: Need update the way we calculate statistics, to account for user-controlled pauses.
+ * If user is adding chunks 1 by 1, there can naturally be a gap in the upload.
+ * If the user lets the stream-window go to zero, there can naturally be a gap in the download. */
+ uint64_t now_ns = 0;
+ if (aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns)) {
+ return;
+ }
+
+ if (!aws_linked_list_empty(&connection->thread_data.outgoing_streams_list)) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+
+ connection->thread_data.outgoing_timestamp_ns = now_ns;
+ }
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) != 0) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+
+ connection->thread_data.incoming_timestamp_ns = now_ns;
+ } else {
+ connection->thread_data.stats.was_inactive = true;
+ }
+
+ void *stats_base = &connection->thread_data.stats;
+ aws_array_list_push_back(stats, &stats_base);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_decoder.c b/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
new file mode 100644
index 00000000000..5c8b7ab7b29
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
@@ -0,0 +1,1592 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/h2_decoder.h>
+
+#include <aws/http/private/hpack.h>
+#include <aws/http/private/strutil.h>
+
+#include <aws/common/string.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* Declared initializers */
+#endif
+
+/***********************************************************************************************************************
+ * Constants
+ **********************************************************************************************************************/
+
+/* The scratch buffers data for states with bytes_required > 0. Must be big enough for largest state */
+static const size_t s_scratch_space_size = 9;
+
+/* Stream ids & dependencies should only write the bottom 31 bits */
+static const uint32_t s_31_bit_mask = UINT32_MAX >> 1;
+
+/* initial size for cookie buffer, buffer will grow if needed */
+static const size_t s_decoder_cookie_buffer_initial_size = 512;
+
+#define DECODER_LOGF(level, decoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_DECODER, "id=%p " text, (decoder)->logging_id, __VA_ARGS__)
+#define DECODER_LOG(level, decoder, text) DECODER_LOGF(level, decoder, "%s", text)
+
+#define DECODER_CALL_VTABLE(decoder, fn) \
+ do { \
+ if ((decoder)->vtable->fn) { \
+ DECODER_LOG(TRACE, decoder, "Invoking callback " #fn); \
+ struct aws_h2err vtable_err = (decoder)->vtable->fn((decoder)->userdata); \
+ if (aws_h2err_failed(vtable_err)) { \
+ DECODER_LOGF( \
+ ERROR, \
+ decoder, \
+ "Error from callback " #fn ", %s->%s", \
+ aws_http2_error_code_to_str(vtable_err.h2_code), \
+ aws_error_name(vtable_err.aws_code)); \
+ return vtable_err; \
+ } \
+ } \
+ } while (false)
+#define DECODER_CALL_VTABLE_ARGS(decoder, fn, ...) \
+ do { \
+ if ((decoder)->vtable->fn) { \
+ DECODER_LOG(TRACE, decoder, "Invoking callback " #fn); \
+ struct aws_h2err vtable_err = (decoder)->vtable->fn(__VA_ARGS__, (decoder)->userdata); \
+ if (aws_h2err_failed(vtable_err)) { \
+ DECODER_LOGF( \
+ ERROR, \
+ decoder, \
+ "Error from callback " #fn ", %s->%s", \
+ aws_http2_error_code_to_str(vtable_err.h2_code), \
+ aws_error_name(vtable_err.aws_code)); \
+ return vtable_err; \
+ } \
+ } \
+ } while (false)
+#define DECODER_CALL_VTABLE_STREAM(decoder, fn) \
+ DECODER_CALL_VTABLE_ARGS(decoder, fn, (decoder)->frame_in_progress.stream_id)
+#define DECODER_CALL_VTABLE_STREAM_ARGS(decoder, fn, ...) \
+ DECODER_CALL_VTABLE_ARGS(decoder, fn, (decoder)->frame_in_progress.stream_id, __VA_ARGS__)
+
+/* for storing things in array without worrying about the specific values of the other AWS_HTTP_HEADER_XYZ enums */
+enum pseudoheader_name {
+ PSEUDOHEADER_UNKNOWN = -1, /* Unrecognized value */
+
+ /* Request pseudo-headers */
+ PSEUDOHEADER_METHOD,
+ PSEUDOHEADER_SCHEME,
+ PSEUDOHEADER_AUTHORITY,
+ PSEUDOHEADER_PATH,
+ /* Response pseudo-headers */
+ PSEUDOHEADER_STATUS,
+
+ PSEUDOHEADER_COUNT, /* Number of valid enums */
+};
+
+static const struct aws_byte_cursor *s_pseudoheader_name_to_cursor[PSEUDOHEADER_COUNT] = {
+ [PSEUDOHEADER_METHOD] = &aws_http_header_method,
+ [PSEUDOHEADER_SCHEME] = &aws_http_header_scheme,
+ [PSEUDOHEADER_AUTHORITY] = &aws_http_header_authority,
+ [PSEUDOHEADER_PATH] = &aws_http_header_path,
+ [PSEUDOHEADER_STATUS] = &aws_http_header_status,
+};
+
+static const enum aws_http_header_name s_pseudoheader_to_header_name[PSEUDOHEADER_COUNT] = {
+ [PSEUDOHEADER_METHOD] = AWS_HTTP_HEADER_METHOD,
+ [PSEUDOHEADER_SCHEME] = AWS_HTTP_HEADER_SCHEME,
+ [PSEUDOHEADER_AUTHORITY] = AWS_HTTP_HEADER_AUTHORITY,
+ [PSEUDOHEADER_PATH] = AWS_HTTP_HEADER_PATH,
+ [PSEUDOHEADER_STATUS] = AWS_HTTP_HEADER_STATUS,
+};
+
+static enum pseudoheader_name s_header_to_pseudoheader_name(enum aws_http_header_name name) {
+ /* The compiled switch statement is actually faster than array lookup with bounds-checking.
+ * (the lookup arrays above don't need to do bounds-checking) */
+ switch (name) {
+ case AWS_HTTP_HEADER_METHOD:
+ return PSEUDOHEADER_METHOD;
+ case AWS_HTTP_HEADER_SCHEME:
+ return PSEUDOHEADER_SCHEME;
+ case AWS_HTTP_HEADER_AUTHORITY:
+ return PSEUDOHEADER_AUTHORITY;
+ case AWS_HTTP_HEADER_PATH:
+ return PSEUDOHEADER_PATH;
+ case AWS_HTTP_HEADER_STATUS:
+ return PSEUDOHEADER_STATUS;
+ default:
+ return PSEUDOHEADER_UNKNOWN;
+ }
+}
+
+/***********************************************************************************************************************
+ * State Machine
+ **********************************************************************************************************************/
+
+typedef struct aws_h2err(state_fn)(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input);
+struct h2_decoder_state {
+ state_fn *fn;
+ uint32_t bytes_required;
+ const char *name;
+};
+
+#define DEFINE_STATE(_name, _bytes_required) \
+ static state_fn s_state_fn_##_name; \
+ enum { s_state_##_name##_requires_##_bytes_required##_bytes = _bytes_required }; \
+ static const struct h2_decoder_state s_state_##_name = { \
+ .fn = s_state_fn_##_name, \
+ .bytes_required = s_state_##_name##_requires_##_bytes_required##_bytes, \
+ .name = #_name, \
+ }
+
+/* Common states */
+DEFINE_STATE(prefix, 9);
+DEFINE_STATE(padding_len, 1);
+DEFINE_STATE(padding, 0);
+
+DEFINE_STATE(priority_block, 5);
+
+DEFINE_STATE(header_block_loop, 0);
+DEFINE_STATE(header_block_entry, 1); /* requires 1 byte, but may consume more */
+
+/* Frame-specific states */
+DEFINE_STATE(frame_data, 0);
+DEFINE_STATE(frame_headers, 0);
+DEFINE_STATE(frame_priority, 0);
+DEFINE_STATE(frame_rst_stream, 4);
+DEFINE_STATE(frame_settings_begin, 0);
+DEFINE_STATE(frame_settings_loop, 0);
+DEFINE_STATE(frame_settings_i, 6);
+DEFINE_STATE(frame_push_promise, 4);
+DEFINE_STATE(frame_ping, 8);
+DEFINE_STATE(frame_goaway, 8);
+DEFINE_STATE(frame_goaway_debug_data, 0);
+DEFINE_STATE(frame_window_update, 4);
+DEFINE_STATE(frame_continuation, 0);
+DEFINE_STATE(frame_unknown, 0);
+
+/* States that have nothing to do with frames */
+DEFINE_STATE(connection_preface_string, 1); /* requires 1 byte but may consume more */
+
+/* Helper for states that need to transition to frame-type states */
+static const struct h2_decoder_state *s_state_frames[AWS_H2_FRAME_TYPE_COUNT] = {
+ [AWS_H2_FRAME_T_DATA] = &s_state_frame_data,
+ [AWS_H2_FRAME_T_HEADERS] = &s_state_frame_headers,
+ [AWS_H2_FRAME_T_PRIORITY] = &s_state_frame_priority,
+ [AWS_H2_FRAME_T_RST_STREAM] = &s_state_frame_rst_stream,
+ [AWS_H2_FRAME_T_SETTINGS] = &s_state_frame_settings_begin,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = &s_state_frame_push_promise,
+ [AWS_H2_FRAME_T_PING] = &s_state_frame_ping,
+ [AWS_H2_FRAME_T_GOAWAY] = &s_state_frame_goaway,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = &s_state_frame_window_update,
+ [AWS_H2_FRAME_T_CONTINUATION] = &s_state_frame_continuation,
+ [AWS_H2_FRAME_T_UNKNOWN] = &s_state_frame_unknown,
+};
+
+/***********************************************************************************************************************
+ * Struct
+ **********************************************************************************************************************/
+
+struct aws_h2_decoder {
+ /* Implementation data. */
+ struct aws_allocator *alloc;
+ const void *logging_id;
+ struct aws_hpack_decoder hpack;
+ bool is_server;
+ struct aws_byte_buf scratch;
+ const struct h2_decoder_state *state;
+ bool state_changed;
+
+ /* HTTP/2 connection preface must be first thing received (RFC-7540 3.5):
+ * Server must receive (client must send): magic string, then SETTINGS frame.
+ * Client must receive (server must send): SETTINGS frame. */
+ bool connection_preface_complete;
+
+ /* Cursor over the canonical client connection preface string */
+ struct aws_byte_cursor connection_preface_cursor;
+
+ /* Frame-in-progress */
+ struct aws_frame_in_progress {
+ enum aws_h2_frame_type type;
+ uint32_t stream_id;
+ uint32_t payload_len;
+ uint8_t padding_len;
+
+ struct {
+ bool ack;
+ bool end_stream;
+ bool end_headers;
+ bool priority;
+ } flags;
+ } frame_in_progress;
+
+ /* GOAWAY buffer */
+ struct aws_goaway_in_progress {
+ uint32_t last_stream;
+ uint32_t error_code;
+ /* Buffer of the received debug data in the latest goaway frame */
+ struct aws_byte_buf debug_data;
+ } goaway_in_progress;
+
+ /* A header-block starts with a HEADERS or PUSH_PROMISE frame, followed by 0 or more CONTINUATION frames.
+ * It's an error for any other frame-type or stream ID to arrive while a header-block is in progress.
+ * The header-block ends when a frame has the END_HEADERS flag set. (RFC-7540 4.3) */
+ struct aws_header_block_in_progress {
+ /* If 0, then no header-block in progress */
+ uint32_t stream_id;
+
+ /* Whether these are informational (1xx), normal, or trailing headers */
+ enum aws_http_header_block block_type;
+
+ /* Buffer up pseudo-headers and deliver them once they're all validated */
+ struct aws_string *pseudoheader_values[PSEUDOHEADER_COUNT];
+ enum aws_http_header_compression pseudoheader_compression[PSEUDOHEADER_COUNT];
+
+ /* All pseudo-header fields MUST appear in the header block before regular header fields. */
+ bool pseudoheaders_done;
+
+ /* T: PUSH_PROMISE header-block
+ * F: HEADERS header-block */
+ bool is_push_promise;
+
+ /* If frame that starts header-block has END_STREAM flag,
+ * then frame that ends header-block also ends the stream. */
+ bool ends_stream;
+
+ /* True if something occurs that makes the header-block malformed (ex: invalid header name).
+ * A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2).
+ * We continue decoding and report that it's malformed in on_headers_end(). */
+ bool malformed;
+
+ bool body_headers_forbidden;
+
+ /* Buffer up cookie header fields to concatenate separate ones */
+ struct aws_byte_buf cookies;
+ /* If separate cookie fields have different compression types, the concatenated cookie uses the strictest type.
+ */
+ enum aws_http_header_compression cookie_header_compression_type;
+ } header_block_in_progress;
+
+ /* Settings for decoder, which is based on the settings sent to the peer and ACKed by peer */
+ struct {
+ /* enable/disable server push */
+ uint32_t enable_push;
+ /* the size of the largest frame payload */
+ uint32_t max_frame_size;
+ } settings;
+
+ struct aws_array_list settings_buffer_list;
+
+ /* User callbacks and settings. */
+ const struct aws_h2_decoder_vtable *vtable;
+ void *userdata;
+
+ /* If this is set to true, decode may no longer be called */
+ bool has_errored;
+};
+
+/***********************************************************************************************************************/
+
+struct aws_h2_decoder *aws_h2_decoder_new(struct aws_h2_decoder_params *params) {
+ AWS_PRECONDITION(params);
+ AWS_PRECONDITION(params->alloc);
+ AWS_PRECONDITION(params->vtable);
+
+ struct aws_h2_decoder *decoder = NULL;
+ void *scratch_buf = NULL;
+
+ void *allocation = aws_mem_acquire_many(
+ params->alloc, 2, &decoder, sizeof(struct aws_h2_decoder), &scratch_buf, s_scratch_space_size);
+ if (!allocation) {
+ goto error;
+ }
+
+ AWS_ZERO_STRUCT(*decoder);
+ decoder->alloc = params->alloc;
+ decoder->vtable = params->vtable;
+ decoder->userdata = params->userdata;
+ decoder->logging_id = params->logging_id;
+ decoder->is_server = params->is_server;
+ decoder->connection_preface_complete = params->skip_connection_preface;
+
+ decoder->scratch = aws_byte_buf_from_empty_array(scratch_buf, s_scratch_space_size);
+
+ aws_hpack_decoder_init(&decoder->hpack, params->alloc, decoder);
+
+ if (decoder->is_server && !params->skip_connection_preface) {
+ decoder->state = &s_state_connection_preface_string;
+ decoder->connection_preface_cursor = aws_h2_connection_preface_client_string;
+ } else {
+ decoder->state = &s_state_prefix;
+ }
+
+ decoder->settings.enable_push = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_ENABLE_PUSH];
+ decoder->settings.max_frame_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE];
+
+ if (aws_array_list_init_dynamic(
+ &decoder->settings_buffer_list, decoder->alloc, 0, sizeof(struct aws_http2_setting))) {
+ goto error;
+ }
+
+ if (aws_byte_buf_init(
+ &decoder->header_block_in_progress.cookies, decoder->alloc, s_decoder_cookie_buffer_initial_size)) {
+ goto error;
+ }
+
+ return decoder;
+
+error:
+ if (decoder) {
+ aws_hpack_decoder_clean_up(&decoder->hpack);
+ aws_array_list_clean_up(&decoder->settings_buffer_list);
+ aws_byte_buf_clean_up(&decoder->header_block_in_progress.cookies);
+ }
+ aws_mem_release(params->alloc, allocation);
+ return NULL;
+}
+
+static void s_reset_header_block_in_progress(struct aws_h2_decoder *decoder) {
+ for (size_t i = 0; i < PSEUDOHEADER_COUNT; ++i) {
+ aws_string_destroy(decoder->header_block_in_progress.pseudoheader_values[i]);
+ }
+ struct aws_byte_buf cookie_backup = decoder->header_block_in_progress.cookies;
+ AWS_ZERO_STRUCT(decoder->header_block_in_progress);
+ decoder->header_block_in_progress.cookies = cookie_backup;
+ aws_byte_buf_reset(&decoder->header_block_in_progress.cookies, false);
+}
+
+void aws_h2_decoder_destroy(struct aws_h2_decoder *decoder) {
+ if (!decoder) {
+ return;
+ }
+ aws_array_list_clean_up(&decoder->settings_buffer_list);
+ aws_hpack_decoder_clean_up(&decoder->hpack);
+ s_reset_header_block_in_progress(decoder);
+ aws_byte_buf_clean_up(&decoder->header_block_in_progress.cookies);
+ aws_byte_buf_clean_up(&decoder->goaway_in_progress.debug_data);
+ aws_mem_release(decoder->alloc, decoder);
+}
+
+struct aws_h2err aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data) {
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(data);
+
+ AWS_FATAL_ASSERT(!decoder->has_errored);
+
+ struct aws_h2err err = AWS_H2ERR_SUCCESS;
+
+ /* Run decoder state machine until we're no longer changing states.
+ * We don't simply loop `while(data->len)` because some states consume no data,
+ * and these states should run even when there is no data left. */
+ do {
+ decoder->state_changed = false;
+
+ const uint32_t bytes_required = decoder->state->bytes_required;
+ AWS_ASSERT(bytes_required <= decoder->scratch.capacity);
+ const char *current_state_name = decoder->state->name;
+ const size_t prev_data_len = data->len;
+ (void)prev_data_len;
+
+ if (!decoder->scratch.len && data->len >= bytes_required) {
+ /* Easy case, there is no scratch and we have enough data, so just send it to the state */
+
+ DECODER_LOGF(TRACE, decoder, "Running state '%s' with %zu bytes available", current_state_name, data->len);
+
+ err = decoder->state->fn(decoder, data);
+ if (aws_h2err_failed(err)) {
+ goto handle_error;
+ }
+
+ AWS_ASSERT(prev_data_len - data->len >= bytes_required && "Decoder state requested more data than it used");
+ } else {
+ /* Otherwise, state requires a minimum amount of data and we have to use the scratch */
+ size_t bytes_to_read = bytes_required - decoder->scratch.len;
+ bool will_finish_state = true;
+
+ if (bytes_to_read > data->len) {
+ /* Not enough in this cursor, need to read as much as possible and then come back */
+ bytes_to_read = data->len;
+ will_finish_state = false;
+ }
+
+ if (AWS_LIKELY(bytes_to_read)) {
+ /* Read the appropriate number of bytes into scratch */
+ struct aws_byte_cursor to_read = aws_byte_cursor_advance(data, bytes_to_read);
+ bool succ = aws_byte_buf_write_from_whole_cursor(&decoder->scratch, to_read);
+ AWS_ASSERT(succ);
+ (void)succ;
+ }
+
+ /* If we have the correct number of bytes, call the state */
+ if (will_finish_state) {
+
+ DECODER_LOGF(TRACE, decoder, "Running state '%s' (using scratch)", current_state_name);
+
+ struct aws_byte_cursor state_data = aws_byte_cursor_from_buf(&decoder->scratch);
+ err = decoder->state->fn(decoder, &state_data);
+ if (aws_h2err_failed(err)) {
+ goto handle_error;
+ }
+
+ AWS_ASSERT(state_data.len == 0 && "Decoder state requested more data than it used");
+ } else {
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "State '%s' requires %" PRIu32 " bytes, but only %zu available, trying again later",
+ current_state_name,
+ bytes_required,
+ decoder->scratch.len);
+ }
+ }
+ } while (decoder->state_changed);
+
+ return AWS_H2ERR_SUCCESS;
+
+handle_error:
+ decoder->has_errored = true;
+ return err;
+}
+
+/***********************************************************************************************************************
+ * State functions
+ **********************************************************************************************************************/
+
+static struct aws_h2err s_decoder_switch_state(struct aws_h2_decoder *decoder, const struct h2_decoder_state *state) {
+ /* Ensure payload is big enough to enter next state.
+ * If this fails, then the payload length we received is too small for this frame type.
+ * (ex: a RST_STREAM frame with < 4 bytes) */
+ if (decoder->frame_in_progress.payload_len < state->bytes_required) {
+ DECODER_LOGF(
+ ERROR, decoder, "%s payload is too small", aws_h2_frame_type_to_str(decoder->frame_in_progress.type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_LOGF(TRACE, decoder, "Moving from state '%s' to '%s'", decoder->state->name, state->name);
+ decoder->scratch.len = 0;
+ decoder->state = state;
+ decoder->state_changed = true;
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_switch_to_frame_state(struct aws_h2_decoder *decoder) {
+ AWS_ASSERT(decoder->frame_in_progress.type < AWS_H2_FRAME_TYPE_COUNT);
+ return s_decoder_switch_state(decoder, s_state_frames[decoder->frame_in_progress.type]);
+}
+
+static struct aws_h2err s_decoder_reset_state(struct aws_h2_decoder *decoder) {
+ /* Ensure we've consumed all payload (and padding) when state machine finishes this frame.
+ * If this fails, the payload length we received is too large for this frame type.
+ * (ex: a RST_STREAM frame with > 4 bytes) */
+ if (decoder->frame_in_progress.payload_len > 0 || decoder->frame_in_progress.padding_len > 0) {
+ DECODER_LOGF(
+ ERROR, decoder, "%s frame payload is too large", aws_h2_frame_type_to_str(decoder->frame_in_progress.type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_LOGF(TRACE, decoder, "%s frame complete", aws_h2_frame_type_to_str(decoder->frame_in_progress.type));
+
+ decoder->scratch.len = 0;
+ decoder->state = &s_state_prefix;
+ decoder->state_changed = true;
+
+ AWS_ZERO_STRUCT(decoder->frame_in_progress);
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Returns as much of the current frame's payload as possible, and updates payload_len */
+static struct aws_byte_cursor s_decoder_get_payload(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ struct aws_byte_cursor result;
+
+ const uint32_t remaining_length = decoder->frame_in_progress.payload_len;
+ if (input->len < remaining_length) {
+ AWS_ASSERT(input->len <= UINT32_MAX);
+ result = aws_byte_cursor_advance(input, input->len);
+ } else {
+ result = aws_byte_cursor_advance(input, remaining_length);
+ }
+
+ decoder->frame_in_progress.payload_len -= (uint32_t)result.len;
+
+ return result;
+}
+
+/* clang-format off */
+
+/* Mask of flags supported by each frame type.
+ * Frames not listed have mask of 0, which means all flags will be ignored. */
+static const uint8_t s_acceptable_flags_for_frame[AWS_H2_FRAME_TYPE_COUNT] = {
+ [AWS_H2_FRAME_T_DATA] = AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_PADDED,
+ [AWS_H2_FRAME_T_HEADERS] = AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_END_HEADERS |
+ AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_PRIORITY,
+ [AWS_H2_FRAME_T_PRIORITY] = 0,
+ [AWS_H2_FRAME_T_RST_STREAM] = 0,
+ [AWS_H2_FRAME_T_SETTINGS] = AWS_H2_FRAME_F_ACK,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED,
+ [AWS_H2_FRAME_T_PING] = AWS_H2_FRAME_F_ACK,
+ [AWS_H2_FRAME_T_GOAWAY] = 0,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = 0,
+ [AWS_H2_FRAME_T_CONTINUATION] = AWS_H2_FRAME_F_END_HEADERS,
+ [AWS_H2_FRAME_T_UNKNOWN] = 0,
+};
+
+enum stream_id_rules {
+ STREAM_ID_REQUIRED,
+ STREAM_ID_FORBIDDEN,
+ STREAM_ID_EITHER_WAY,
+};
+
+/* Frame-types generally either require a stream-id, or require that it be zero. */
+static const enum stream_id_rules s_stream_id_rules_for_frame[AWS_H2_FRAME_TYPE_COUNT] = {
+ [AWS_H2_FRAME_T_DATA] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_HEADERS] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_PRIORITY] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_RST_STREAM] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_SETTINGS] = STREAM_ID_FORBIDDEN,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_PING] = STREAM_ID_FORBIDDEN,
+ [AWS_H2_FRAME_T_GOAWAY] = STREAM_ID_FORBIDDEN,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = STREAM_ID_EITHER_WAY, /* WINDOW_UPDATE is special and can do either */
+ [AWS_H2_FRAME_T_CONTINUATION] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_UNKNOWN] = STREAM_ID_EITHER_WAY, /* Everything in an UNKNOWN frame type is ignored */
+};
+/* clang-format on */
+
+/* All frames begin with a fixed 9-octet header followed by a variable-length payload. (RFC-7540 4.1)
+ * This function processes everything preceding Frame Payload in the following diagram:
+ * +-----------------------------------------------+
+ * | Length (24) |
+ * +---------------+---------------+---------------+
+ * | Type (8) | Flags (8) |
+ * +-+-------------+---------------+-------------------------------+
+ * |R| Stream Identifier (31) |
+ * +=+=============================================================+
+ * | Frame Payload (0...) ...
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_prefix(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_prefix_requires_9_bytes);
+
+ struct aws_frame_in_progress *frame = &decoder->frame_in_progress;
+ uint8_t raw_type = 0;
+ uint8_t raw_flags = 0;
+
+ /* Read the raw values from the first 9 bytes */
+ bool all_read = true;
+ all_read &= aws_byte_cursor_read_be24(input, &frame->payload_len);
+ all_read &= aws_byte_cursor_read_u8(input, &raw_type);
+ all_read &= aws_byte_cursor_read_u8(input, &raw_flags);
+ all_read &= aws_byte_cursor_read_be32(input, &frame->stream_id);
+ AWS_ASSERT(all_read);
+ (void)all_read;
+
+ /* Validate frame type */
+ frame->type = raw_type < AWS_H2_FRAME_T_UNKNOWN ? raw_type : AWS_H2_FRAME_T_UNKNOWN;
+
+ /* Validate the frame's flags
+ * Flags that have no defined semantics for a particular frame type MUST be ignored (RFC-7540 4.1) */
+ const uint8_t flags = raw_flags & s_acceptable_flags_for_frame[decoder->frame_in_progress.type];
+
+ bool is_padded = flags & AWS_H2_FRAME_F_PADDED;
+ decoder->frame_in_progress.flags.ack = flags & AWS_H2_FRAME_F_ACK;
+ decoder->frame_in_progress.flags.end_stream = flags & AWS_H2_FRAME_F_END_STREAM;
+ decoder->frame_in_progress.flags.end_headers = flags & AWS_H2_FRAME_F_END_HEADERS;
+ decoder->frame_in_progress.flags.priority =
+ flags & AWS_H2_FRAME_F_PRIORITY || decoder->frame_in_progress.type == AWS_H2_FRAME_T_PRIORITY;
+
+ /* Connection preface requires that SETTINGS be sent first (RFC-7540 3.5).
+ * This should be the first error we check for, so that a connection sending
+ * total garbage data is likely to trigger this PROTOCOL_ERROR */
+ if (!decoder->connection_preface_complete) {
+ if (frame->type == AWS_H2_FRAME_T_SETTINGS && !frame->flags.ack) {
+ DECODER_LOG(TRACE, decoder, "Connection preface satisfied.");
+ decoder->connection_preface_complete = true;
+ } else {
+ DECODER_LOG(ERROR, decoder, "First frame must be SETTINGS");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+
+ /* Validate the frame's stream ID. */
+
+ /* Reserved bit (1st bit) MUST be ignored when receiving (RFC-7540 4.1) */
+ frame->stream_id &= s_31_bit_mask;
+
+ /* Some frame types require a stream ID, some frame types require that stream ID be zero. */
+ const enum stream_id_rules stream_id_rules = s_stream_id_rules_for_frame[frame->type];
+ if (frame->stream_id) {
+ if (stream_id_rules == STREAM_ID_FORBIDDEN) {
+ DECODER_LOGF(ERROR, decoder, "Stream ID for %s frame must be 0.", aws_h2_frame_type_to_str(frame->type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ } else {
+ if (stream_id_rules == STREAM_ID_REQUIRED) {
+ DECODER_LOGF(ERROR, decoder, "Stream ID for %s frame cannot be 0.", aws_h2_frame_type_to_str(frame->type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+
+ /* A header-block starts with a HEADERS or PUSH_PROMISE frame, followed by 0 or more CONTINUATION frames.
+ * It's an error for any other frame-type or stream ID to arrive while a header-block is in progress.
+ * (RFC-7540 4.3) */
+ if (frame->type == AWS_H2_FRAME_T_CONTINUATION) {
+ if (decoder->header_block_in_progress.stream_id != frame->stream_id) {
+ DECODER_LOG(ERROR, decoder, "Unexpected CONTINUATION frame.");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ } else {
+ if (decoder->header_block_in_progress.stream_id) {
+ DECODER_LOG(ERROR, decoder, "Expected CONTINUATION frame.");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+
+ /* Validate payload length. */
+ uint32_t max_frame_size = decoder->settings.max_frame_size;
+ if (frame->payload_len > max_frame_size) {
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "Decoder's max frame size is %" PRIu32 ", but frame of size %" PRIu32 " was received.",
+ max_frame_size,
+ frame->payload_len);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "Done decoding frame prefix (type=%s stream-id=%" PRIu32 " payload-len=%" PRIu32 "), moving on to payload",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ frame->payload_len);
+
+ if (is_padded) {
+ /* Read padding length if necessary */
+ return s_decoder_switch_state(decoder, &s_state_padding_len);
+ }
+ if (decoder->frame_in_progress.type == AWS_H2_FRAME_T_DATA) {
+ /* We invoke the on_data_begin here to report the whole payload size */
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_data_begin, frame->payload_len, 0 /*padding_len*/, frame->flags.end_stream);
+ }
+ if (decoder->frame_in_progress.flags.priority) {
+ /* Read the stream dependency and weight if PRIORITY is set */
+ return s_decoder_switch_state(decoder, &s_state_priority_block);
+ }
+
+ /* Set the state to the appropriate frame's state */
+ return s_decoder_switch_to_frame_state(decoder);
+}
+
+/* Frames that support padding, and have the PADDED flag set, begin with a 1-byte Pad Length.
+ * (Actual padding comes later at the very end of the frame)
+ * +---------------+
+ * |Pad Length? (8)|
+ * +---------------+
+ */
+static struct aws_h2err s_state_fn_padding_len(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_padding_len_requires_1_bytes);
+
+ struct aws_frame_in_progress *frame = &decoder->frame_in_progress;
+ /* Read the padding length */
+ bool succ = aws_byte_cursor_read_u8(input, &frame->padding_len);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ /* Adjust payload size so it doesn't include padding (or the 1-byte padding length) */
+ uint32_t reduce_payload = s_state_padding_len_requires_1_bytes + frame->padding_len;
+ if (reduce_payload > decoder->frame_in_progress.payload_len) {
+ DECODER_LOG(ERROR, decoder, "Padding length exceeds payload length");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ if (frame->type == AWS_H2_FRAME_T_DATA) {
+ /* We invoke the on_data_begin here to report the whole payload size and the padding size */
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_data_begin, frame->payload_len, frame->padding_len + 1, frame->flags.end_stream);
+ }
+
+ frame->payload_len -= reduce_payload;
+
+ DECODER_LOGF(TRACE, decoder, "Padding length of frame: %" PRIu32, frame->padding_len);
+ if (frame->flags.priority) {
+ /* Read the stream dependency and weight if PRIORITY is set */
+ return s_decoder_switch_state(decoder, &s_state_priority_block);
+ }
+
+ /* Set the state to the appropriate frame's state */
+ return s_decoder_switch_to_frame_state(decoder);
+}
+
+static struct aws_h2err s_state_fn_padding(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ const uint8_t remaining_len = decoder->frame_in_progress.padding_len;
+ const uint8_t consuming_len = input->len < remaining_len ? (uint8_t)input->len : remaining_len;
+ aws_byte_cursor_advance(input, consuming_len);
+ decoder->frame_in_progress.padding_len -= consuming_len;
+
+ if (remaining_len == consuming_len) {
+ /* Done with the frame! */
+ return s_decoder_reset_state(decoder);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Shared code for:
+ * PRIORITY frame (RFC-7540 6.3)
+ * Start of HEADERS frame IF the priority flag is set (RFC-7540 6.2)
+ * +-+-------------+-----------------------------------------------+
+ * |E| Stream Dependency (31) |
+ * +-+-------------+-----------------------------------------------+
+ * | Weight (8) |
+ * +-+-------------+-----------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_priority_block(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_priority_block_requires_5_bytes);
+
+ /* #NOTE: throw priority data on the GROUND. They make us hecka vulnerable to DDoS and stuff.
+ * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9513
+ */
+ aws_byte_cursor_advance(input, s_state_priority_block_requires_5_bytes);
+
+ decoder->frame_in_progress.payload_len -= s_state_priority_block_requires_5_bytes;
+
+ return s_decoder_switch_to_frame_state(decoder);
+}
+
+static struct aws_h2err s_state_fn_frame_data(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ const struct aws_byte_cursor body_data = s_decoder_get_payload(decoder, input);
+
+ if (body_data.len) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_data_i, body_data);
+ }
+
+ if (decoder->frame_in_progress.payload_len == 0) {
+ DECODER_CALL_VTABLE_STREAM(decoder, on_data_end);
+ /* If frame had END_STREAM flag, alert user now */
+ if (decoder->frame_in_progress.flags.end_stream) {
+ DECODER_CALL_VTABLE_STREAM(decoder, on_end_stream);
+ }
+
+ /* Process padding if necessary, otherwise we're done! */
+ return s_decoder_switch_state(decoder, &s_state_padding);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+static struct aws_h2err s_state_fn_frame_headers(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* Start header-block and alert the user */
+ decoder->header_block_in_progress.stream_id = decoder->frame_in_progress.stream_id;
+ decoder->header_block_in_progress.is_push_promise = false;
+ decoder->header_block_in_progress.ends_stream = decoder->frame_in_progress.flags.end_stream;
+
+ DECODER_CALL_VTABLE_STREAM(decoder, on_headers_begin);
+
+ /* Read the header-block fragment */
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+static struct aws_h2err s_state_fn_frame_priority(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* We already processed this data in the shared priority_block state, so we're done! */
+ return s_decoder_reset_state(decoder);
+}
+
+/* RST_STREAM is just a 4-byte error code.
+ * +---------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_rst_stream(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_rst_stream_requires_4_bytes);
+
+ uint32_t error_code = 0;
+ bool succ = aws_byte_cursor_read_be32(input, &error_code);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_rst_stream_requires_4_bytes;
+
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_rst_stream, error_code);
+
+ return s_decoder_reset_state(decoder);
+}
+
+/* A SETTINGS frame may contain any number of 6-byte entries.
+ * This state consumes no data, but sends us into the appropriate next state */
+static struct aws_h2err s_state_fn_frame_settings_begin(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* If ack is set, report and we're done */
+ if (decoder->frame_in_progress.flags.ack) {
+ /* Receipt of a SETTINGS frame with the ACK flag set and a length field value other
+ * than 0 MUST be treated as a connection error of type FRAME_SIZE_ERROR */
+ if (decoder->frame_in_progress.payload_len) {
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "SETTINGS ACK frame received, but it has non-0 payload length %" PRIu32,
+ decoder->frame_in_progress.payload_len);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_CALL_VTABLE(decoder, on_settings_ack);
+ return s_decoder_reset_state(decoder);
+ }
+
+ if (decoder->frame_in_progress.payload_len % s_state_frame_settings_i_requires_6_bytes != 0) {
+ /* A SETTINGS frame with a length other than a multiple of 6 octets MUST be
+ * treated as a connection error (Section 5.4.1) of type FRAME_SIZE_ERROR */
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "Settings frame payload length is %" PRIu32 ", but it must be divisible by %" PRIu32,
+ decoder->frame_in_progress.payload_len,
+ s_state_frame_settings_i_requires_6_bytes);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ /* Enter looping states until all entries are consumed. */
+ return s_decoder_switch_state(decoder, &s_state_frame_settings_loop);
+}
+
+/* Check if we're done consuming settings */
+static struct aws_h2err s_state_fn_frame_settings_loop(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ if (decoder->frame_in_progress.payload_len == 0) {
+ /* Huzzah, done with the frame, fire the callback */
+ struct aws_array_list *buffer = &decoder->settings_buffer_list;
+ DECODER_CALL_VTABLE_ARGS(
+ decoder, on_settings, buffer->data, aws_array_list_length(&decoder->settings_buffer_list));
+ /* clean up the buffer */
+ aws_array_list_clear(&decoder->settings_buffer_list);
+ return s_decoder_reset_state(decoder);
+ }
+
+ return s_decoder_switch_state(decoder, &s_state_frame_settings_i);
+}
+
+/* Each run through this state consumes one 6-byte setting.
+ * There may be multiple settings in a SETTINGS frame.
+ * +-------------------------------+
+ * | Identifier (16) |
+ * +-------------------------------+-------------------------------+
+ * | Value (32) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_settings_i(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_settings_i_requires_6_bytes);
+
+ uint16_t id = 0;
+ uint32_t value = 0;
+
+ bool succ = aws_byte_cursor_read_be16(input, &id);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ succ = aws_byte_cursor_read_be32(input, &value);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ /* An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier MUST ignore that setting.
+ * RFC-7540 6.5.2 */
+ if (id >= AWS_HTTP2_SETTINGS_BEGIN_RANGE && id < AWS_HTTP2_SETTINGS_END_RANGE) {
+ /* check the value meets the settings bounds */
+ if (value < aws_h2_settings_bounds[id][0] || value > aws_h2_settings_bounds[id][1]) {
+ DECODER_LOGF(
+ ERROR, decoder, "A value of SETTING frame is invalid, id: %" PRIu16 ", value: %" PRIu32, id, value);
+ if (id == AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE) {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ } else {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+ struct aws_http2_setting setting;
+ setting.id = id;
+ setting.value = value;
+ /* array_list will keep a copy of setting, it is fine to be a local variable */
+ if (aws_array_list_push_back(&decoder->settings_buffer_list, &setting)) {
+ DECODER_LOGF(ERROR, decoder, "Writing setting to buffer failed, %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+ }
+
+ /* Update payload len */
+ decoder->frame_in_progress.payload_len -= s_state_frame_settings_i_requires_6_bytes;
+
+ return s_decoder_switch_state(decoder, &s_state_frame_settings_loop);
+}
+
+/* Read 4-byte Promised Stream ID
+ * The rest of the frame is just like HEADERS, so move on to shared states...
+ * +-+-------------------------------------------------------------+
+ * |R| Promised Stream ID (31) |
+ * +-+-----------------------------+-------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_push_promise(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ if (decoder->settings.enable_push == 0) {
+ /* treat the receipt of a PUSH_PROMISE frame as a connection error of type PROTOCOL_ERROR.(RFC-7540 6.5.2) */
+ DECODER_LOG(ERROR, decoder, "PUSH_PROMISE is invalid, the seting for enable push is 0");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ AWS_ASSERT(input->len >= s_state_frame_push_promise_requires_4_bytes);
+
+ uint32_t promised_stream_id = 0;
+ bool succ = aws_byte_cursor_read_be32(input, &promised_stream_id);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_push_promise_requires_4_bytes;
+
+ /* Reserved bit (top bit) must be ignored when receiving (RFC-7540 4.1) */
+ promised_stream_id &= s_31_bit_mask;
+
+ /* Promised stream ID must not be 0 (RFC-7540 6.6).
+ * Promised stream ID (server-initiated) must be even-numbered (RFC-7540 5.1.1). */
+ if ((promised_stream_id == 0) || (promised_stream_id % 2) != 0) {
+ DECODER_LOGF(ERROR, decoder, "PUSH_PROMISE is promising invalid stream ID %" PRIu32, promised_stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ /* Server cannot receive PUSH_PROMISE frames */
+ if (decoder->is_server) {
+ DECODER_LOG(ERROR, decoder, "Server cannot receive PUSH_PROMISE frames");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ /* Start header-block and alert the user. */
+ decoder->header_block_in_progress.stream_id = decoder->frame_in_progress.stream_id;
+ decoder->header_block_in_progress.is_push_promise = true;
+ decoder->header_block_in_progress.ends_stream = false;
+
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_begin, promised_stream_id);
+
+ /* Read the header-block fragment */
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+
+/* PING frame is just 8-bytes of opaque data.
+ * +---------------------------------------------------------------+
+ * | |
+ * | Opaque Data (64) |
+ * | |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_ping(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_ping_requires_8_bytes);
+
+ uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE] = {0};
+ bool succ = aws_byte_cursor_read(input, &opaque_data, AWS_HTTP2_PING_DATA_SIZE);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_ping_requires_8_bytes;
+
+ if (decoder->frame_in_progress.flags.ack) {
+ DECODER_CALL_VTABLE_ARGS(decoder, on_ping_ack, opaque_data);
+ } else {
+ DECODER_CALL_VTABLE_ARGS(decoder, on_ping, opaque_data);
+ }
+
+ return s_decoder_reset_state(decoder);
+}
+
+/* Read first 8 bytes of GOAWAY.
+ * This may be followed by N bytes of debug data.
+ * +-+-------------------------------------------------------------+
+ * |R| Last-Stream-ID (31) |
+ * +-+-------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_goaway(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_goaway_requires_8_bytes);
+
+ uint32_t last_stream = 0;
+ uint32_t error_code = AWS_HTTP2_ERR_NO_ERROR;
+
+ bool succ = aws_byte_cursor_read_be32(input, &last_stream);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ last_stream &= s_31_bit_mask;
+
+ succ = aws_byte_cursor_read_be32(input, &error_code);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_goaway_requires_8_bytes;
+ uint32_t debug_data_length = decoder->frame_in_progress.payload_len;
+ /* Received new GOAWAY, clean up the previous one. Buffer it up and invoke the callback once the debug data decoded
+ * fully. */
+ decoder->goaway_in_progress.error_code = error_code;
+ decoder->goaway_in_progress.last_stream = last_stream;
+ int init_result = aws_byte_buf_init(&decoder->goaway_in_progress.debug_data, decoder->alloc, debug_data_length);
+ AWS_ASSERT(init_result == 0);
+ (void)init_result;
+
+ return s_decoder_switch_state(decoder, &s_state_frame_goaway_debug_data);
+}
+
+/* Optional remainder of GOAWAY frame.
+ * +---------------------------------------------------------------+
+ * | Additional Debug Data (*) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_goaway_debug_data(
+ struct aws_h2_decoder *decoder,
+ struct aws_byte_cursor *input) {
+
+ struct aws_byte_cursor debug_data = s_decoder_get_payload(decoder, input);
+ if (debug_data.len > 0) {
+ /* As we initialized the buffer to the size of debug data, we can safely append here */
+ aws_byte_buf_append(&decoder->goaway_in_progress.debug_data, &debug_data);
+ }
+
+ /* If this is the last data in the frame, reset decoder */
+ if (decoder->frame_in_progress.payload_len == 0) {
+ struct aws_byte_cursor debug_cursor = aws_byte_cursor_from_buf(&decoder->goaway_in_progress.debug_data);
+
+ DECODER_CALL_VTABLE_ARGS(
+ decoder,
+ on_goaway,
+ decoder->goaway_in_progress.last_stream,
+ decoder->goaway_in_progress.error_code,
+ debug_cursor);
+ aws_byte_buf_clean_up(&decoder->goaway_in_progress.debug_data);
+ return s_decoder_reset_state(decoder);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* WINDOW_UPDATE frame.
+ * +-+-------------------------------------------------------------+
+ * |R| Window Size Increment (31) |
+ * +-+-------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_window_update(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_window_update_requires_4_bytes);
+
+ uint32_t window_increment = 0;
+ bool succ = aws_byte_cursor_read_be32(input, &window_increment);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_window_update_requires_4_bytes;
+
+ window_increment &= s_31_bit_mask;
+
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_window_update, window_increment);
+
+ return s_decoder_reset_state(decoder);
+}
+
+/* CONTINUATION is a lot like HEADERS, so it uses shared states. */
+static struct aws_h2err s_state_fn_frame_continuation(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* Read the header-block fragment */
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+
+/* Implementations MUST ignore and discard any frame that has a type that is unknown. */
+static struct aws_h2err s_state_fn_frame_unknown(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ /* Read all data possible, and throw it on the floor */
+ s_decoder_get_payload(decoder, input);
+
+ /* If there's no more data expected, end the frame */
+ if (decoder->frame_in_progress.payload_len == 0) {
+ return s_decoder_reset_state(decoder);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Perform analysis that can't be done until all pseudo-headers are received.
+ * Then deliver buffered pseudoheaders via callback */
+static struct aws_h2err s_flush_pseudoheaders(struct aws_h2_decoder *decoder) {
+ struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress;
+
+ if (current_block->malformed) {
+ goto already_malformed;
+ }
+
+ if (current_block->pseudoheaders_done) {
+ return AWS_H2ERR_SUCCESS;
+ }
+ current_block->pseudoheaders_done = true;
+
+ /* s_process_header_field() already checked that we're not mixing request & response pseudoheaders */
+ bool has_request_pseudoheaders = false;
+ for (int i = PSEUDOHEADER_METHOD; i <= PSEUDOHEADER_PATH; ++i) {
+ if (current_block->pseudoheader_values[i] != NULL) {
+ has_request_pseudoheaders = true;
+ break;
+ }
+ }
+
+ bool has_response_pseudoheaders = current_block->pseudoheader_values[PSEUDOHEADER_STATUS] != NULL;
+
+ if (current_block->is_push_promise && !has_request_pseudoheaders) {
+ DECODER_LOG(ERROR, decoder, "PUSH_PROMISE is missing :method");
+ goto malformed;
+ }
+
+ if (has_request_pseudoheaders) {
+ /* Request header-block. */
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_MAIN;
+
+ } else if (has_response_pseudoheaders) {
+ /* Response header block. */
+
+ /* Determine whether this is an Informational (1xx) response */
+ struct aws_byte_cursor status_value =
+ aws_byte_cursor_from_string(current_block->pseudoheader_values[PSEUDOHEADER_STATUS]);
+ uint64_t status_code;
+ if (status_value.len != 3 || aws_byte_cursor_utf8_parse_u64(status_value, &status_code)) {
+ DECODER_LOG(ERROR, decoder, ":status header has invalid value");
+ DECODER_LOGF(DEBUG, decoder, "Bad :status value is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(status_value));
+ goto malformed;
+ }
+
+ if (status_code / 100 == 1) {
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_INFORMATIONAL;
+
+ if (current_block->ends_stream) {
+ /* Informational headers do not constitute a full response (RFC-7540 8.1) */
+ DECODER_LOG(ERROR, decoder, "Informational (1xx) response cannot END_STREAM");
+ goto malformed;
+ }
+ current_block->body_headers_forbidden = true;
+ } else {
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_MAIN;
+ }
+ /**
+ * RFC-9110 8.6.
+ * A server MUST NOT send a Content-Length header field in any response with a status code of 1xx
+ * (Informational) or 204 (No Content).
+ */
+ current_block->body_headers_forbidden |= status_code == AWS_HTTP_STATUS_CODE_204_NO_CONTENT;
+
+ } else {
+ /* Trailing header block. */
+ if (!current_block->ends_stream) {
+ DECODER_LOG(ERROR, decoder, "HEADERS appear to be trailer, but lack END_STREAM");
+ goto malformed;
+ }
+
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_TRAILING;
+ }
+
+ /* #TODO RFC-7540 8.1.2.3 & 8.3 Validate request has correct pseudoheaders. Note different rules for CONNECT */
+ /* #TODO validate pseudoheader values. each one has its own special rules */
+
+ /* Finally, deliver header-fields via callback */
+ for (size_t i = 0; i < PSEUDOHEADER_COUNT; ++i) {
+ const struct aws_string *value_string = current_block->pseudoheader_values[i];
+ if (value_string) {
+
+ struct aws_http_header header_field = {
+ .name = *s_pseudoheader_name_to_cursor[i],
+ .value = aws_byte_cursor_from_string(value_string),
+ .compression = current_block->pseudoheader_compression[i],
+ };
+
+ enum aws_http_header_name name_enum = s_pseudoheader_to_header_name[i];
+
+ if (current_block->is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, &header_field, name_enum);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_headers_i, &header_field, name_enum, current_block->block_type);
+ }
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+
+malformed:
+ /* A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2).
+ * We continue decoding and report that it's malformed in on_headers_end(). */
+ current_block->malformed = true;
+ return AWS_H2ERR_SUCCESS;
+already_malformed:
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Process single header-field.
+ * If it's invalid, mark the header-block as malformed.
+ * If it's valid, and header-block is not malformed, deliver via callback. */
+static struct aws_h2err s_process_header_field(
+ struct aws_h2_decoder *decoder,
+ const struct aws_http_header *header_field) {
+
+ struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress;
+ if (current_block->malformed) {
+ goto already_malformed;
+ }
+
+ const struct aws_byte_cursor name = header_field->name;
+ if (name.len == 0) {
+ DECODER_LOG(ERROR, decoder, "Header name is blank");
+ goto malformed;
+ }
+
+ enum aws_http_header_name name_enum = aws_http_lowercase_str_to_header_name(name);
+
+ bool is_pseudoheader = name.ptr[0] == ':';
+ if (is_pseudoheader) {
+ if (current_block->pseudoheaders_done) {
+ /* Note: being careful not to leak possibly sensitive data except at DEBUG level and lower */
+ DECODER_LOG(ERROR, decoder, "Pseudo-headers must appear before regular fields.");
+ DECODER_LOGF(DEBUG, decoder, "Misplaced pseudo-header is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+
+ enum pseudoheader_name pseudoheader_enum = s_header_to_pseudoheader_name(name_enum);
+ if (pseudoheader_enum == PSEUDOHEADER_UNKNOWN) {
+ DECODER_LOG(ERROR, decoder, "Unrecognized pseudo-header");
+ DECODER_LOGF(DEBUG, decoder, "Unrecognized pseudo-header is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+
+ /* Ensure request pseudo-headers vs response pseudoheaders were sent appropriately.
+ * This also ensures that request and response pseudoheaders aren't being mixed. */
+ bool expect_request_pseudoheader = decoder->is_server || current_block->is_push_promise;
+ bool is_request_pseudoheader = pseudoheader_enum != PSEUDOHEADER_STATUS;
+ if (expect_request_pseudoheader != is_request_pseudoheader) {
+ DECODER_LOGF(
+ ERROR, /* ok to log name of recognized pseudo-header at ERROR level */
+ decoder,
+ "'" PRInSTR "' pseudo-header cannot be in %s header-block to %s",
+ AWS_BYTE_CURSOR_PRI(name),
+ current_block->is_push_promise ? "PUSH_PROMISE" : "HEADERS",
+ decoder->is_server ? "server" : "client");
+ goto malformed;
+ }
+
+ /* Protect against duplicates. */
+ if (current_block->pseudoheader_values[pseudoheader_enum] != NULL) {
+ /* ok to log name of recognized pseudo-header at ERROR level */
+ DECODER_LOGF(
+ ERROR, decoder, "'" PRInSTR "' pseudo-header occurred multiple times", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+
+ /* Buffer up pseudo-headers, we'll deliver them later once they're all validated. */
+ current_block->pseudoheader_compression[pseudoheader_enum] = header_field->compression;
+ current_block->pseudoheader_values[pseudoheader_enum] =
+ aws_string_new_from_cursor(decoder->alloc, &header_field->value);
+ if (!current_block->pseudoheader_values[pseudoheader_enum]) {
+ return aws_h2err_from_last_error();
+ }
+
+ } else { /* Else regular header-field. */
+
+ /* Regular header-fields come after pseudo-headers, so make sure pseudo-headers are flushed */
+ if (!current_block->pseudoheaders_done) {
+ struct aws_h2err err = s_flush_pseudoheaders(decoder);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ /* might have realized that header-block is malformed during flush */
+ if (current_block->malformed) {
+ goto already_malformed;
+ }
+ }
+
+ /* Validate header name (not necessary if string already matched against a known enum) */
+ if (name_enum == AWS_HTTP_HEADER_UNKNOWN) {
+ if (!aws_strutil_is_lowercase_http_token(name)) {
+ DECODER_LOG(ERROR, decoder, "Header name contains invalid characters");
+ DECODER_LOGF(DEBUG, decoder, "Bad header name is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+ }
+
+ /* #TODO Validate characters used in header_field->value */
+
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_COOKIE:
+ /* for a header cookie, we will not fire callback until we concatenate them all, let's store it at the
+ * buffer */
+ if (header_field->compression > current_block->cookie_header_compression_type) {
+ current_block->cookie_header_compression_type = header_field->compression;
+ }
+
+ if (current_block->cookies.len) {
+ /* add a delimiter */
+ struct aws_byte_cursor delimiter = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("; ");
+ if (aws_byte_buf_append_dynamic(&current_block->cookies, &delimiter)) {
+ return aws_h2err_from_last_error();
+ }
+ }
+ if (aws_byte_buf_append_dynamic(&current_block->cookies, &header_field->value)) {
+ return aws_h2err_from_last_error();
+ }
+ /* Early return */
+ return AWS_H2ERR_SUCCESS;
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING:
+ case AWS_HTTP_HEADER_UPGRADE:
+ case AWS_HTTP_HEADER_KEEP_ALIVE:
+ case AWS_HTTP_HEADER_PROXY_CONNECTION: {
+ /* connection-specific header field are treated as malformed (RFC9113 8.2.2) */
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "Connection-specific header ('" PRInSTR "') found, not allowed in HTTP/2",
+ AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ } break;
+
+ case AWS_HTTP_HEADER_CONTENT_LENGTH:
+ if (current_block->body_headers_forbidden) {
+ /* The content-length are forbidden */
+ DECODER_LOG(ERROR, decoder, "Unexpected Content-Length header found");
+ goto malformed;
+ }
+ break;
+ default:
+ break;
+ }
+ /* Deliver header-field via callback */
+ if (current_block->is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, header_field, name_enum);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_headers_i, header_field, name_enum, current_block->block_type);
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+
+malformed:
+ /* A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2).
+ * We continue decoding and report that it's malformed in on_headers_end(). */
+ current_block->malformed = true;
+ return AWS_H2ERR_SUCCESS;
+already_malformed:
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_flush_cookie_header(struct aws_h2_decoder *decoder) {
+ struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress;
+ if (current_block->malformed) {
+ return AWS_H2ERR_SUCCESS;
+ }
+ if (current_block->cookies.len == 0) {
+ /* Nothing to flush */
+ return AWS_H2ERR_SUCCESS;
+ }
+ struct aws_http_header concatenated_cookie;
+ struct aws_byte_cursor header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cookie");
+ concatenated_cookie.name = header_name;
+ concatenated_cookie.value = aws_byte_cursor_from_buf(&current_block->cookies);
+ concatenated_cookie.compression = current_block->cookie_header_compression_type;
+ if (current_block->is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, &concatenated_cookie, AWS_HTTP_HEADER_COOKIE);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_headers_i, &concatenated_cookie, AWS_HTTP_HEADER_COOKIE, current_block->block_type);
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* This state checks whether we've consumed the current frame's entire header-block fragment.
+ * We revisit this state after each entry is decoded.
+ * This state consumes no data. */
+static struct aws_h2err s_state_fn_header_block_loop(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* If we're out of payload data, handle frame complete */
+ if (decoder->frame_in_progress.payload_len == 0) {
+
+ /* If this is the end of the header-block, invoke callback and clear header_block_in_progress */
+ if (decoder->frame_in_progress.flags.end_headers) {
+ /* Ensure pseudo-headers have been flushed */
+ struct aws_h2err err = s_flush_pseudoheaders(decoder);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ /* flush the concatenated cookie header */
+ err = s_flush_cookie_header(decoder);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ bool malformed = decoder->header_block_in_progress.malformed;
+ DECODER_LOGF(TRACE, decoder, "Done decoding header-block, malformed=%d", malformed);
+
+ if (decoder->header_block_in_progress.is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_end, malformed);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_headers_end, malformed, decoder->header_block_in_progress.block_type);
+ }
+
+ /* If header-block began with END_STREAM flag, alert user now */
+ if (decoder->header_block_in_progress.ends_stream) {
+ DECODER_CALL_VTABLE_STREAM(decoder, on_end_stream);
+ }
+
+ s_reset_header_block_in_progress(decoder);
+
+ } else {
+ DECODER_LOG(TRACE, decoder, "Done decoding header-block fragment, expecting CONTINUATION frames");
+ }
+
+ /* Finish this frame */
+ return s_decoder_switch_state(decoder, &s_state_padding);
+ }
+
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "Decoding header-block entry, %" PRIu32 " bytes remaining in payload",
+ decoder->frame_in_progress.payload_len);
+
+ return s_decoder_switch_state(decoder, &s_state_header_block_entry);
+}
+
+/* We stay in this state until a single "entry" is decoded from the header-block fragment.
+ * Then we return to the header_block_loop state */
+static struct aws_h2err s_state_fn_header_block_entry(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ /* This state requires at least 1 byte, but will likely consume more */
+ AWS_ASSERT(input->len >= s_state_header_block_entry_requires_1_bytes);
+
+ /* Feed header-block fragment to HPACK decoder.
+ * Don't let decoder consume anything beyond payload_len. */
+ struct aws_byte_cursor fragment = *input;
+ if (fragment.len > decoder->frame_in_progress.payload_len) {
+ fragment.len = decoder->frame_in_progress.payload_len;
+ }
+
+ const size_t prev_fragment_len = fragment.len;
+
+ struct aws_hpack_decode_result result;
+ if (aws_hpack_decode(&decoder->hpack, &fragment, &result)) {
+ DECODER_LOGF(ERROR, decoder, "Error decoding header-block fragment: %s", aws_error_name(aws_last_error()));
+
+ /* Any possible error from HPACK decoder (except OOM) is treated as a COMPRESSION error. */
+ if (aws_last_error() == AWS_ERROR_OOM) {
+ return aws_h2err_from_last_error();
+ } else {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_COMPRESSION_ERROR);
+ }
+ }
+
+ /* HPACK decoder returns when it reaches the end of an entry, or when it's consumed the whole fragment.
+ * Update input & payload_len to reflect the number of bytes consumed. */
+ const size_t bytes_consumed = prev_fragment_len - fragment.len;
+ aws_byte_cursor_advance(input, bytes_consumed);
+ decoder->frame_in_progress.payload_len -= (uint32_t)bytes_consumed;
+
+ if (result.type == AWS_HPACK_DECODE_T_ONGOING) {
+ /* HPACK decoder hasn't finished entry */
+
+ if (decoder->frame_in_progress.payload_len > 0) {
+ /* More payload is coming. Remain in state until it arrives */
+ DECODER_LOG(TRACE, decoder, "Header-block entry partially decoded, waiting for more data.");
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ if (decoder->frame_in_progress.flags.end_headers) {
+ /* Reached end of the frame's payload, and this frame ends the header-block.
+ * Error if we ended up with a partially decoded entry. */
+ DECODER_LOG(ERROR, decoder, "Compression error: incomplete entry at end of header-block");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_COMPRESSION_ERROR);
+ }
+
+ /* Reached end of this frame's payload, but CONTINUATION frames are expected to arrive.
+ * We'll resume decoding this entry when we get them. */
+ DECODER_LOG(TRACE, decoder, "Header-block entry partially decoded, resumes in CONTINUATION frame");
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+ }
+
+ /* Finished decoding HPACK entry! */
+
+ /* #TODO Enforces dynamic table resize rules from RFC-7541 4.2
+ * If dynamic table size changed via SETTINGS frame, next header-block must start with DYNAMIC_TABLE_RESIZE entry.
+ * Is it illegal to receive a resize entry at other times? */
+
+ /* #TODO The TE header field ... MUST NOT contain any value other than "trailers" */
+
+ if (result.type == AWS_HPACK_DECODE_T_HEADER_FIELD) {
+ const struct aws_http_header *header_field = &result.data.header_field;
+
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "Decoded header field: \"" PRInSTR ": " PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(header_field->name),
+ AWS_BYTE_CURSOR_PRI(header_field->value));
+
+ struct aws_h2err err = s_process_header_field(decoder, header_field);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+
+/* The first thing a client sends on a connection is a 24 byte magic string (RFC-7540 3.5).
+ * Note that this state doesn't "require" the full 24 bytes, it runs as data arrives.
+ * This avoids hanging if < 24 bytes rolled in. */
+static struct aws_h2err s_state_fn_connection_preface_string(
+ struct aws_h2_decoder *decoder,
+ struct aws_byte_cursor *input) {
+ size_t remaining_len = decoder->connection_preface_cursor.len;
+ size_t consuming_len = input->len < remaining_len ? input->len : remaining_len;
+
+ struct aws_byte_cursor expected = aws_byte_cursor_advance(&decoder->connection_preface_cursor, consuming_len);
+
+ struct aws_byte_cursor received = aws_byte_cursor_advance(input, consuming_len);
+
+ if (!aws_byte_cursor_eq(&expected, &received)) {
+ DECODER_LOG(ERROR, decoder, "Client connection preface is invalid");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ if (decoder->connection_preface_cursor.len == 0) {
+ /* Done receiving connection preface string, proceed to decoding normal frames. */
+ return s_decoder_reset_state(decoder);
+ }
+
+ /* Remain in state until more data arrives */
+ return AWS_H2ERR_SUCCESS;
+}
+
+void aws_h2_decoder_set_setting_header_table_size(struct aws_h2_decoder *decoder, uint32_t data) {
+ /* Set the protocol_max_size_setting for hpack. */
+ aws_hpack_decoder_update_max_table_size(&decoder->hpack, data);
+}
+
+void aws_h2_decoder_set_setting_enable_push(struct aws_h2_decoder *decoder, uint32_t data) {
+ decoder->settings.enable_push = data;
+}
+
+void aws_h2_decoder_set_setting_max_frame_size(struct aws_h2_decoder *decoder, uint32_t data) {
+ decoder->settings.max_frame_size = data;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_frames.c b/contrib/restricted/aws/aws-c-http/source/h2_frames.c
new file mode 100644
index 00000000000..12b5ca0849c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_frames.c
@@ -0,0 +1,1233 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_frames.h>
+
+#include <aws/compression/huffman.h>
+
+#include <aws/common/logging.h>
+
+#include <aws/io/stream.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+#define ENCODER_LOGF(level, encoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p " text, (encoder)->logging_id, __VA_ARGS__)
+
+#define ENCODER_LOG(level, encoder, text) ENCODER_LOGF(level, encoder, "%s", text)
+
+const struct aws_byte_cursor aws_h2_connection_preface_client_string =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n");
+
+/* Initial values and bounds are from RFC-7540 6.5.2 */
+const uint32_t aws_h2_settings_initial[AWS_HTTP2_SETTINGS_END_RANGE] = {
+ [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE] = 4096,
+ [AWS_HTTP2_SETTINGS_ENABLE_PUSH] = 1,
+ [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] = UINT32_MAX, /* "Initially there is no limit to this value" */
+ [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] = AWS_H2_INIT_WINDOW_SIZE,
+ [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE] = 16384,
+ [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE] = UINT32_MAX, /* "The initial value of this setting is unlimited" */
+};
+
+const uint32_t aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_END_RANGE][2] = {
+ [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE][0] = 0,
+ [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE][1] = UINT32_MAX,
+
+ [AWS_HTTP2_SETTINGS_ENABLE_PUSH][0] = 0,
+ [AWS_HTTP2_SETTINGS_ENABLE_PUSH][1] = 1,
+
+ [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS][0] = 0,
+ [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS][1] = UINT32_MAX,
+
+ [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE][0] = 0,
+ [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE][1] = AWS_H2_WINDOW_UPDATE_MAX,
+
+ [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][0] = 16384,
+ [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][1] = AWS_H2_PAYLOAD_MAX,
+
+ [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE][0] = 0,
+ [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE][1] = UINT32_MAX,
+};
+
+/* Stream ids & dependencies should only write the bottom 31 bits */
+static const uint32_t s_u32_top_bit_mask = UINT32_MAX << 31;
+
+/* Bytes to initially reserve for encoding of an entire header block. Buffer will grow if necessary. */
+static const size_t s_encoded_header_block_reserve = 128; /* Value pulled from thin air */
+
+#define DEFINE_FRAME_VTABLE(NAME) \
+ static aws_h2_frame_destroy_fn s_frame_##NAME##_destroy; \
+ static aws_h2_frame_encode_fn s_frame_##NAME##_encode; \
+ static const struct aws_h2_frame_vtable s_frame_##NAME##_vtable = { \
+ .destroy = s_frame_##NAME##_destroy, \
+ .encode = s_frame_##NAME##_encode, \
+ }
+
+const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type) {
+ switch (type) {
+ case AWS_H2_FRAME_T_DATA:
+ return "DATA";
+ case AWS_H2_FRAME_T_HEADERS:
+ return "HEADERS";
+ case AWS_H2_FRAME_T_PRIORITY:
+ return "PRIORITY";
+ case AWS_H2_FRAME_T_RST_STREAM:
+ return "RST_STREAM";
+ case AWS_H2_FRAME_T_SETTINGS:
+ return "SETTINGS";
+ case AWS_H2_FRAME_T_PUSH_PROMISE:
+ return "PUSH_PROMISE";
+ case AWS_H2_FRAME_T_PING:
+ return "PING";
+ case AWS_H2_FRAME_T_GOAWAY:
+ return "GOAWAY";
+ case AWS_H2_FRAME_T_WINDOW_UPDATE:
+ return "WINDOW_UPDATE";
+ case AWS_H2_FRAME_T_CONTINUATION:
+ return "CONTINUATION";
+ default:
+ return "**UNKNOWN**";
+ }
+}
+
+const char *aws_http2_error_code_to_str(enum aws_http2_error_code h2_error_code) {
+ switch (h2_error_code) {
+ case AWS_HTTP2_ERR_NO_ERROR:
+ return "NO_ERROR";
+ case AWS_HTTP2_ERR_PROTOCOL_ERROR:
+ return "PROTOCOL_ERROR";
+ case AWS_HTTP2_ERR_INTERNAL_ERROR:
+ return "INTERNAL_ERROR";
+ case AWS_HTTP2_ERR_FLOW_CONTROL_ERROR:
+ return "FLOW_CONTROL_ERROR";
+ case AWS_HTTP2_ERR_SETTINGS_TIMEOUT:
+ return "SETTINGS_TIMEOUT";
+ case AWS_HTTP2_ERR_STREAM_CLOSED:
+ return "STREAM_CLOSED";
+ case AWS_HTTP2_ERR_FRAME_SIZE_ERROR:
+ return "FRAME_SIZE_ERROR";
+ case AWS_HTTP2_ERR_REFUSED_STREAM:
+ return "REFUSED_STREAM";
+ case AWS_HTTP2_ERR_CANCEL:
+ return "CANCEL";
+ case AWS_HTTP2_ERR_COMPRESSION_ERROR:
+ return "COMPRESSION_ERROR";
+ case AWS_HTTP2_ERR_CONNECT_ERROR:
+ return "CONNECT_ERROR";
+ case AWS_HTTP2_ERR_ENHANCE_YOUR_CALM:
+ return "ENHANCE_YOUR_CALM";
+ case AWS_HTTP2_ERR_INADEQUATE_SECURITY:
+ return "INADEQUATE_SECURITY";
+ case AWS_HTTP2_ERR_HTTP_1_1_REQUIRED:
+ return "HTTP_1_1_REQUIRED";
+ default:
+ return "UNKNOWN_ERROR";
+ }
+}
+
+struct aws_h2err aws_h2err_from_h2_code(enum aws_http2_error_code h2_error_code) {
+ AWS_PRECONDITION(h2_error_code > AWS_HTTP2_ERR_NO_ERROR && h2_error_code < AWS_HTTP2_ERR_COUNT);
+
+ return (struct aws_h2err){
+ .h2_code = h2_error_code,
+ .aws_code = AWS_ERROR_HTTP_PROTOCOL_ERROR,
+ };
+}
+
+struct aws_h2err aws_h2err_from_aws_code(int aws_error_code) {
+ AWS_PRECONDITION(aws_error_code != 0);
+
+ return (struct aws_h2err){
+ .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR,
+ .aws_code = aws_error_code,
+ };
+}
+
+struct aws_h2err aws_h2err_from_last_error(void) {
+ return aws_h2err_from_aws_code(aws_last_error());
+}
+
+bool aws_h2err_success(struct aws_h2err err) {
+ return err.h2_code == 0 && err.aws_code == 0;
+}
+
+bool aws_h2err_failed(struct aws_h2err err) {
+ return err.h2_code != 0 || err.aws_code != 0;
+}
+
+int aws_h2_validate_stream_id(uint32_t stream_id) {
+ if (stream_id == 0 || stream_id > AWS_H2_STREAM_ID_MAX) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Determine max frame payload length that will:
+ * 1) fit in output's available space
+ * 2) obey encoders current MAX_FRAME_SIZE
+ *
+ * Assumes no part of the frame has been written yet to output.
+ * The total length of the frame would be: returned-payload-len + AWS_H2_FRAME_PREFIX_SIZE
+ *
+ * Raises error if there is not enough space available for even a frame prefix.
+ */
+static int s_get_max_contiguous_payload_length(
+ const struct aws_h2_frame_encoder *encoder,
+ const struct aws_byte_buf *output,
+ size_t *max_payload_length) {
+
+ const size_t space_available = output->capacity - output->len;
+
+ size_t max_payload_given_space_available;
+ if (aws_sub_size_checked(space_available, AWS_H2_FRAME_PREFIX_SIZE, &max_payload_given_space_available)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ size_t max_payload_given_settings = encoder->settings.max_frame_size;
+
+ *max_payload_length = aws_min_size(max_payload_given_space_available, max_payload_given_settings);
+ return AWS_OP_SUCCESS;
+}
+
+/***********************************************************************************************************************
+ * Priority
+ **********************************************************************************************************************/
+static size_t s_frame_priority_settings_size = 5;
+
+static void s_frame_priority_settings_encode(
+ const struct aws_h2_frame_priority_settings *priority,
+ struct aws_byte_buf *output) {
+ AWS_PRECONDITION(priority);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION((priority->stream_dependency & s_u32_top_bit_mask) == 0);
+ (void)s_u32_top_bit_mask;
+
+ /* PRIORITY is encoded as (RFC-7540 6.3):
+ * +-+-------------------------------------------------------------+
+ * |E| Stream Dependency (31) |
+ * +-+-------------+-----------------------------------------------+
+ * | Weight (8) |
+ * +-+-------------+
+ */
+ bool writes_ok = true;
+
+ /* Write the top 4 bytes */
+ uint32_t top_bytes = priority->stream_dependency | ((uint32_t)priority->stream_dependency_exclusive << 31);
+ writes_ok &= aws_byte_buf_write_be32(output, top_bytes);
+
+ /* Write the priority weight */
+ writes_ok &= aws_byte_buf_write_u8(output, priority->weight);
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+}
+
+/***********************************************************************************************************************
+ * Common Frame Prefix
+ **********************************************************************************************************************/
+static void s_init_frame_base(
+ struct aws_h2_frame *frame_base,
+ struct aws_allocator *alloc,
+ enum aws_h2_frame_type type,
+ const struct aws_h2_frame_vtable *vtable,
+ uint32_t stream_id) {
+
+ frame_base->vtable = vtable;
+ frame_base->alloc = alloc;
+ frame_base->type = type;
+ frame_base->stream_id = stream_id;
+}
+
+static void s_frame_prefix_encode(
+ enum aws_h2_frame_type type,
+ uint32_t stream_id,
+ size_t length,
+ uint8_t flags,
+ struct aws_byte_buf *output) {
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(!(stream_id & s_u32_top_bit_mask), "Invalid stream ID");
+ AWS_PRECONDITION(length <= AWS_H2_PAYLOAD_MAX);
+
+ /* Frame prefix is encoded like this (RFC-7540 4.1):
+ * +-----------------------------------------------+
+ * | Length (24) |
+ * +---------------+---------------+---------------+
+ * | Type (8) | Flags (8) |
+ * +-+-------------+---------------+-------------------------------+
+ * |R| Stream Identifier (31) |
+ * +=+=============================================================+
+ */
+ bool writes_ok = true;
+
+ /* Write length */
+ writes_ok &= aws_byte_buf_write_be24(output, (uint32_t)length);
+
+ /* Write type */
+ writes_ok &= aws_byte_buf_write_u8(output, type);
+
+ /* Write flags */
+ writes_ok &= aws_byte_buf_write_u8(output, flags);
+
+ /* Write stream id (with reserved first bit) */
+ writes_ok &= aws_byte_buf_write_be32(output, stream_id);
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+}
+
+/***********************************************************************************************************************
+ * Encoder
+ **********************************************************************************************************************/
+int aws_h2_frame_encoder_init(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_allocator *allocator,
+ const void *logging_id) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(allocator);
+
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->allocator = allocator;
+ encoder->logging_id = logging_id;
+
+ aws_hpack_encoder_init(&encoder->hpack, allocator, logging_id);
+
+ encoder->settings.max_frame_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE];
+ return AWS_OP_SUCCESS;
+}
+void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder) {
+ AWS_PRECONDITION(encoder);
+
+ aws_hpack_encoder_clean_up(&encoder->hpack);
+}
+
+/***********************************************************************************************************************
+ * DATA
+ **********************************************************************************************************************/
+int aws_h2_encode_data_frame(
+ struct aws_h2_frame_encoder *encoder,
+ uint32_t stream_id,
+ struct aws_input_stream *body_stream,
+ bool body_ends_stream,
+ uint8_t pad_length,
+ int32_t *stream_window_size_peer,
+ size_t *connection_window_size_peer,
+ struct aws_byte_buf *output,
+ bool *body_complete,
+ bool *body_stalled) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(body_stream);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(body_complete);
+ AWS_PRECONDITION(body_stalled);
+ AWS_PRECONDITION(*stream_window_size_peer > 0);
+
+ if (aws_h2_validate_stream_id(stream_id)) {
+ return AWS_OP_ERR;
+ }
+
+ *body_complete = false;
+ *body_stalled = false;
+ uint8_t flags = 0;
+
+ /*
+ * Payload-length is the first thing encoded in a frame, but we don't know how
+ * much data we'll get from the body-stream until we actually read it.
+ * Therefore, we determine the exact location that the body data should go,
+ * then stream the body directly into that part of the output buffer.
+ * Then we will go and write the other parts of the frame in around it.
+ */
+
+ size_t bytes_preceding_body = AWS_H2_FRAME_PREFIX_SIZE;
+ size_t payload_overhead = 0; /* Amount of "payload" that will not contain body (padding) */
+ if (pad_length > 0) {
+ flags |= AWS_H2_FRAME_F_PADDED;
+
+ /* Padding len is 1st byte of payload (padding itself goes at end of payload) */
+ bytes_preceding_body += 1;
+ payload_overhead = 1 + pad_length;
+ }
+
+ /* Max amount allowed by stream and connection flow-control window */
+ size_t min_window_size = aws_min_size(*stream_window_size_peer, *connection_window_size_peer);
+
+ /* Max amount of payload we can do right now */
+ size_t max_payload;
+ if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) {
+ goto handle_waiting_for_more_space;
+ }
+ /* The flow-control window will limit the size for max_payload of a flow-controlled frame */
+ max_payload = aws_min_size(max_payload, min_window_size);
+ /* Max amount of body we can fit in the payload*/
+ size_t max_body;
+ if (aws_sub_size_checked(max_payload, payload_overhead, &max_body) || max_body == 0) {
+ goto handle_waiting_for_more_space;
+ }
+
+ /* Use a sub-buffer to limit where body can go */
+ struct aws_byte_buf body_sub_buf =
+ aws_byte_buf_from_empty_array(output->buffer + output->len + bytes_preceding_body, max_body);
+
+ /* Read body into sub-buffer */
+ if (aws_input_stream_read(body_stream, &body_sub_buf)) {
+ goto error;
+ }
+
+ /* Check if we've reached the end of the body */
+ struct aws_stream_status body_status;
+ if (aws_input_stream_get_status(body_stream, &body_status)) {
+ goto error;
+ }
+
+ if (body_status.is_end_of_stream) {
+ *body_complete = true;
+ if (body_ends_stream) {
+ flags |= AWS_H2_FRAME_F_END_STREAM;
+ }
+ } else {
+ if (body_sub_buf.len < body_sub_buf.capacity) {
+ /* Body stream was unable to provide as much data as it could have */
+ *body_stalled = true;
+
+ if (body_sub_buf.len == 0) {
+ /* This frame would have no useful information, don't even bother sending it */
+ goto handle_nothing_to_send_right_now;
+ }
+ }
+ }
+
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Encoding frame type=DATA stream_id=%" PRIu32 " data_len=%zu stalled=%d%s",
+ stream_id,
+ body_sub_buf.len,
+ *body_stalled,
+ (flags & AWS_H2_FRAME_F_END_STREAM) ? " END_STREAM" : "");
+
+ /*
+ * Write in the other parts of the frame.
+ */
+ bool writes_ok = true;
+
+ /* Write the frame prefix */
+ const size_t payload_len = body_sub_buf.len + payload_overhead;
+ s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output);
+
+ /* Write pad length */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ writes_ok &= aws_byte_buf_write_u8(output, pad_length);
+ }
+
+ /* Increment output->len to jump over the body that we already wrote in */
+ AWS_ASSERT(output->buffer + output->len == body_sub_buf.buffer && "Streamed DATA to wrong position");
+ output->len += body_sub_buf.len;
+
+ /* Write padding */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length);
+ }
+
+ /* update the connection window size now, we will update stream window size when this function returns */
+ AWS_ASSERT(payload_len <= min_window_size);
+ *connection_window_size_peer -= payload_len;
+ *stream_window_size_peer -= (int32_t)payload_len;
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+ return AWS_OP_SUCCESS;
+
+handle_waiting_for_more_space:
+ ENCODER_LOGF(TRACE, encoder, "Insufficient space to encode DATA for stream %" PRIu32 " right now", stream_id);
+ return AWS_OP_SUCCESS;
+
+handle_nothing_to_send_right_now:
+ ENCODER_LOGF(INFO, encoder, "Stream %" PRIu32 " produced 0 bytes of body data", stream_id);
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+/***********************************************************************************************************************
+ * HEADERS / PUSH_PROMISE
+ **********************************************************************************************************************/
+DEFINE_FRAME_VTABLE(headers);
+
+/* Represents a HEADERS or PUSH_PROMISE frame (followed by zero or more CONTINUATION frames) */
+struct aws_h2_frame_headers {
+ struct aws_h2_frame base;
+
+ /* Common data */
+ const struct aws_http_headers *headers;
+ uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */
+
+ /* HEADERS-only data */
+ bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */
+ bool has_priority; /* AWS_H2_FRAME_F_PRIORITY */
+ struct aws_h2_frame_priority_settings priority;
+
+ /* PUSH_PROMISE-only data */
+ uint32_t promised_stream_id;
+
+ /* State */
+ enum {
+ AWS_H2_HEADERS_STATE_INIT,
+ AWS_H2_HEADERS_STATE_FIRST_FRAME, /* header-block pre-encoded, no frames written yet */
+ AWS_H2_HEADERS_STATE_CONTINUATION, /* first frame written, need to write CONTINUATION frames now */
+ AWS_H2_HEADERS_STATE_COMPLETE,
+ } state;
+
+ struct aws_byte_buf whole_encoded_header_block;
+ struct aws_byte_cursor header_block_cursor; /* tracks progress sending encoded header-block in fragments */
+};
+
+static struct aws_h2_frame *s_frame_new_headers_or_push_promise(
+ struct aws_allocator *allocator,
+ enum aws_h2_frame_type frame_type,
+ uint32_t stream_id,
+ const struct aws_http_headers *headers,
+ uint8_t pad_length,
+ bool end_stream,
+ const struct aws_h2_frame_priority_settings *optional_priority,
+ uint32_t promised_stream_id) {
+
+ /* TODO: Host and ":authority" are no longer permitted to disagree. Should we enforce it here or sent it as
+ * requested, let the server side reject the request? */
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(frame_type == AWS_H2_FRAME_T_HEADERS || frame_type == AWS_H2_FRAME_T_PUSH_PROMISE);
+ AWS_PRECONDITION(headers);
+
+ /* Validate args */
+
+ if (aws_h2_validate_stream_id(stream_id)) {
+ return NULL;
+ }
+
+ if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) {
+ if (aws_h2_validate_stream_id(promised_stream_id)) {
+ return NULL;
+ }
+ }
+
+ if (optional_priority && aws_h2_validate_stream_id(optional_priority->stream_dependency)) {
+ return NULL;
+ }
+
+ /* Create */
+
+ struct aws_h2_frame_headers *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_headers));
+ if (!frame) {
+ return NULL;
+ }
+
+ if (aws_byte_buf_init(&frame->whole_encoded_header_block, allocator, s_encoded_header_block_reserve)) {
+ goto error;
+ }
+
+ if (frame_type == AWS_H2_FRAME_T_HEADERS) {
+ frame->end_stream = end_stream;
+ if (optional_priority) {
+ frame->has_priority = true;
+ frame->priority = *optional_priority;
+ }
+ } else {
+ frame->promised_stream_id = promised_stream_id;
+ }
+
+ s_init_frame_base(&frame->base, allocator, frame_type, &s_frame_headers_vtable, stream_id);
+
+ aws_http_headers_acquire((struct aws_http_headers *)headers);
+ frame->headers = headers;
+ frame->pad_length = pad_length;
+
+ return &frame->base;
+
+error:
+ s_frame_headers_destroy(&frame->base);
+ return NULL;
+}
+
+struct aws_h2_frame *aws_h2_frame_new_headers(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_http_headers *headers,
+ bool end_stream,
+ uint8_t pad_length,
+ const struct aws_h2_frame_priority_settings *optional_priority) {
+
+ return s_frame_new_headers_or_push_promise(
+ allocator,
+ AWS_H2_FRAME_T_HEADERS,
+ stream_id,
+ headers,
+ pad_length,
+ end_stream,
+ optional_priority,
+ 0 /* HEADERS doesn't have promised_stream_id */);
+}
+
+struct aws_h2_frame *aws_h2_frame_new_push_promise(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t promised_stream_id,
+ const struct aws_http_headers *headers,
+ uint8_t pad_length) {
+
+ return s_frame_new_headers_or_push_promise(
+ allocator,
+ AWS_H2_FRAME_T_PUSH_PROMISE,
+ stream_id,
+ headers,
+ pad_length,
+ false /* PUSH_PROMISE doesn't have end_stream flag */,
+ NULL /* PUSH_PROMISE doesn't have priority_settings */,
+ promised_stream_id);
+}
+
+static void s_frame_headers_destroy(struct aws_h2_frame *frame_base) {
+ struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base);
+ aws_http_headers_release((struct aws_http_headers *)frame->headers);
+ aws_byte_buf_clean_up(&frame->whole_encoded_header_block);
+ aws_mem_release(frame->base.alloc, frame);
+}
+
+/* Encode the next frame for this header-block (or encode nothing if output buffer is too small). */
+static void s_encode_single_header_block_frame(
+ struct aws_h2_frame_headers *frame,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *waiting_for_more_space) {
+
+ /*
+ * Figure out the details of the next frame to encode.
+ * The first frame will be either HEADERS or PUSH_PROMISE.
+ * All subsequent frames will be CONTINUATION
+ */
+
+ enum aws_h2_frame_type frame_type;
+ uint8_t flags = 0;
+ uint8_t pad_length = 0;
+ const struct aws_h2_frame_priority_settings *priority_settings = NULL;
+ const uint32_t *promised_stream_id = NULL;
+ size_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */
+
+ if (frame->state == AWS_H2_HEADERS_STATE_FIRST_FRAME) {
+ frame_type = frame->base.type;
+
+ if (frame->pad_length > 0) {
+ flags |= AWS_H2_FRAME_F_PADDED;
+ pad_length = frame->pad_length;
+ payload_overhead += 1 + pad_length;
+ }
+
+ if (frame->has_priority) {
+ priority_settings = &frame->priority;
+ flags |= AWS_H2_FRAME_F_PRIORITY;
+ payload_overhead += s_frame_priority_settings_size;
+ }
+
+ if (frame->end_stream) {
+ flags |= AWS_H2_FRAME_F_END_STREAM;
+ }
+
+ if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) {
+ promised_stream_id = &frame->promised_stream_id;
+ payload_overhead += 4;
+ }
+
+ } else /* CONTINUATION */ {
+ frame_type = AWS_H2_FRAME_T_CONTINUATION;
+ }
+
+ /*
+ * Figure out what size header-block fragment should go in this frame.
+ */
+
+ size_t max_payload;
+ if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) {
+ goto handle_waiting_for_more_space;
+ }
+
+ size_t max_fragment;
+ if (aws_sub_size_checked(max_payload, payload_overhead, &max_fragment)) {
+ goto handle_waiting_for_more_space;
+ }
+
+ const size_t fragment_len = aws_min_size(max_fragment, frame->header_block_cursor.len);
+ if (fragment_len == frame->header_block_cursor.len) {
+ /* This will finish the header-block */
+ flags |= AWS_H2_FRAME_F_END_HEADERS;
+ } else {
+ /* If we're not finishing the header-block, is it even worth trying to send this frame now? */
+ const size_t even_worth_sending_threshold = AWS_H2_FRAME_PREFIX_SIZE + payload_overhead;
+ if (fragment_len < even_worth_sending_threshold) {
+ goto handle_waiting_for_more_space;
+ }
+ }
+
+ /*
+ * Ok, it fits! Write the frame
+ */
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Encoding frame type=%s stream_id=%" PRIu32 "%s%s",
+ aws_h2_frame_type_to_str(frame_type),
+ frame->base.stream_id,
+ (flags & AWS_H2_FRAME_F_END_HEADERS) ? " END_HEADERS" : "",
+ (flags & AWS_H2_FRAME_F_END_STREAM) ? " END_STREAM" : "");
+
+ bool writes_ok = true;
+
+ /* Write the frame prefix */
+ const size_t payload_len = fragment_len + payload_overhead;
+ s_frame_prefix_encode(frame_type, frame->base.stream_id, payload_len, flags, output);
+
+ /* Write pad length */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION);
+ writes_ok &= aws_byte_buf_write_u8(output, pad_length);
+ }
+
+ /* Write priority */
+ if (flags & AWS_H2_FRAME_F_PRIORITY) {
+ AWS_ASSERT(frame_type == AWS_H2_FRAME_T_HEADERS);
+ s_frame_priority_settings_encode(priority_settings, output);
+ }
+
+ /* Write promised stream ID */
+ if (promised_stream_id) {
+ AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE);
+ writes_ok &= aws_byte_buf_write_be32(output, *promised_stream_id);
+ }
+
+ /* Write header-block fragment */
+ if (fragment_len > 0) {
+ struct aws_byte_cursor fragment = aws_byte_cursor_advance(&frame->header_block_cursor, fragment_len);
+ writes_ok &= aws_byte_buf_write_from_whole_cursor(output, fragment);
+ }
+
+ /* Write padding */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length);
+ }
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ /* Success! Wrote entire frame. It's safe to change state now */
+ frame->state =
+ flags & AWS_H2_FRAME_F_END_HEADERS ? AWS_H2_HEADERS_STATE_COMPLETE : AWS_H2_HEADERS_STATE_CONTINUATION;
+ *waiting_for_more_space = false;
+ return;
+
+handle_waiting_for_more_space:
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Insufficient space to encode %s for stream %" PRIu32 " right now",
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+ *waiting_for_more_space = true;
+}
+
+static int s_frame_headers_encode(
+ struct aws_h2_frame *frame_base,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *complete) {
+
+ struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base);
+
+ /* Pre-encode the entire header-block into another buffer
+ * the first time we're called. */
+ if (frame->state == AWS_H2_HEADERS_STATE_INIT) {
+ if (aws_hpack_encode_header_block(&encoder->hpack, frame->headers, &frame->whole_encoded_header_block)) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Error doing HPACK encoding on %s of stream %" PRIu32 ": %s",
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ frame->header_block_cursor = aws_byte_cursor_from_buf(&frame->whole_encoded_header_block);
+ frame->state = AWS_H2_HEADERS_STATE_FIRST_FRAME;
+ }
+
+ /* Write frames (HEADER or PUSH_PROMISE, followed by N CONTINUATION frames)
+ * until we're done writing header-block or the buffer is too full to continue */
+ bool waiting_for_more_space = false;
+ while (frame->state < AWS_H2_HEADERS_STATE_COMPLETE && !waiting_for_more_space) {
+ s_encode_single_header_block_frame(frame, encoder, output, &waiting_for_more_space);
+ }
+
+ *complete = frame->state == AWS_H2_HEADERS_STATE_COMPLETE;
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+/***********************************************************************************************************************
+ * aws_h2_frame_prebuilt - Used by small simple frame types that we can pre-encode at the time of creation.
+ * The pre-encoded buffer is then just copied bit-by-bit during the actual "encode()" function.
+ *
+ * It's safe to pre-encode a frame if it doesn't query/mutate any external state. So PING is totally great
+ * to pre-encode, but HEADERS (which queries MAX_FRAME_SIZE and mutates the HPACK table) would be a bad candidate.
+ **********************************************************************************************************************/
+struct aws_h2_frame_prebuilt {
+ struct aws_h2_frame base;
+
+ /* The whole entire frame is pre-encoded to this buffer during construction.
+ * The buffer has the exact capacity necessary to hold the frame */
+ struct aws_byte_buf encoded_buf;
+
+ /* After construction, this cursor points to the full contents of encoded_buf.
+ * As encode() is called, we copy the contents to output and advance the cursor.*/
+ struct aws_byte_cursor cursor;
+};
+
+DEFINE_FRAME_VTABLE(prebuilt);
+
+/* Can't pre-encode a frame unless it's guaranteed to fit, regardless of current settings. */
+static size_t s_prebuilt_payload_max(void) {
+ return aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][0];
+}
+
+/* Create aws_h2_frame_prebuilt and encode frame prefix into frame->encoded_buf.
+ * Caller must encode the payload to fill the rest of the encoded_buf. */
+static struct aws_h2_frame_prebuilt *s_h2_frame_new_prebuilt(
+ struct aws_allocator *allocator,
+ enum aws_h2_frame_type type,
+ uint32_t stream_id,
+ size_t payload_len,
+ uint8_t flags) {
+
+ AWS_PRECONDITION(payload_len <= s_prebuilt_payload_max());
+
+ const size_t encoded_frame_len = AWS_H2_FRAME_PREFIX_SIZE + payload_len;
+
+ /* Use single allocation for frame and buffer storage */
+ struct aws_h2_frame_prebuilt *frame;
+ void *storage;
+ if (!aws_mem_acquire_many(
+ allocator, 2, &frame, sizeof(struct aws_h2_frame_prebuilt), &storage, encoded_frame_len)) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*frame);
+ s_init_frame_base(&frame->base, allocator, type, &s_frame_prebuilt_vtable, stream_id);
+
+ /* encoded_buf has the exact amount of space necessary for the full encoded frame.
+ * The constructor of our subclass must finish filling up encoded_buf with the payload. */
+ frame->encoded_buf = aws_byte_buf_from_empty_array(storage, encoded_frame_len);
+
+ /* cursor points to full capacity of encoded_buf.
+ * Our subclass's constructor will finish writing the payload and fill encoded_buf to capacity.
+ * When encode() is called, we'll copy cursor's contents into available output space and advance the cursor. */
+ frame->cursor = aws_byte_cursor_from_array(storage, encoded_frame_len);
+
+ /* Write frame prefix */
+ s_frame_prefix_encode(type, stream_id, payload_len, flags, &frame->encoded_buf);
+
+ return frame;
+}
+
+static void s_frame_prebuilt_destroy(struct aws_h2_frame *frame_base) {
+ aws_mem_release(frame_base->alloc, frame_base);
+}
+
+static int s_frame_prebuilt_encode(
+ struct aws_h2_frame *frame_base,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *complete) {
+
+ (void)encoder;
+ struct aws_h2_frame_prebuilt *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_prebuilt, base);
+
+ /* encoded_buf should have been filled to capacity during construction */
+ AWS_ASSERT(frame->encoded_buf.len == frame->encoded_buf.capacity);
+
+ /* After construction, cursor points to the full contents of encoded_buf.
+ * As encode() is called, we copy the contents to output and advance the cursor. */
+ if (frame->cursor.len == frame->encoded_buf.len) {
+ /* We haven't sent anything yet, announce start of frame */
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Encoding frame type=%s stream_id=%" PRIu32,
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+ } else {
+ /* We've already sent a bit, announce that we're resuming */
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Resume encoding frame type=%s stream_id=%" PRIu32,
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+ }
+
+ bool writes_ok = true;
+
+ /* Copy as much as we can from cursor (pre-encoded frame contents) to output.
+ * Advance the cursor to mark our progress. */
+ size_t chunk_len = aws_min_size(frame->cursor.len, output->capacity - output->len);
+ struct aws_byte_cursor chunk = aws_byte_cursor_advance(&frame->cursor, chunk_len);
+ writes_ok &= aws_byte_buf_write_from_whole_cursor(output, chunk);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ if (frame->cursor.len == 0) {
+ *complete = true;
+ } else {
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Incomplete encoding of frame type=%s stream_id=%" PRIu32 ", will resume later...",
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+
+ *complete = false;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/***********************************************************************************************************************
+ * PRIORITY
+ **********************************************************************************************************************/
+struct aws_h2_frame *aws_h2_frame_new_priority(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_h2_frame_priority_settings *priority) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(priority);
+
+ if (aws_h2_validate_stream_id(stream_id) || aws_h2_validate_stream_id(priority->stream_dependency)) {
+ return NULL;
+ }
+
+ /* PRIORITY can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = s_frame_priority_settings_size;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PRIORITY, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the priority settings */
+ s_frame_priority_settings_encode(priority, &frame->encoded_buf);
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * RST_STREAM
+ **********************************************************************************************************************/
+static const size_t s_frame_rst_stream_length = 4;
+
+struct aws_h2_frame *aws_h2_frame_new_rst_stream(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t error_code) {
+
+ if (aws_h2_validate_stream_id(stream_id)) {
+ return NULL;
+ }
+
+ /* RST_STREAM can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = s_frame_rst_stream_length;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_RST_STREAM, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write RST_STREAM payload (RFC-7540 6.4):
+ * +---------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * SETTINGS
+ **********************************************************************************************************************/
+static const size_t s_frame_setting_length = 6;
+
+struct aws_h2_frame *aws_h2_frame_new_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ bool ack) {
+
+ AWS_PRECONDITION(settings_array || num_settings == 0);
+
+ /* Cannot send settings in an ACK frame */
+ if (ack && num_settings > 0) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* Check against insane edge case of too many settings to fit in a frame. */
+ const size_t max_settings = s_prebuilt_payload_max() / s_frame_setting_length;
+ if (num_settings > max_settings) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_ENCODER,
+ "Cannot create SETTINGS frame with %zu settings, the limit is %zu.",
+ num_settings,
+ max_settings);
+
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* SETTINGS can be pre-encoded */
+ const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0;
+ const size_t payload_len = num_settings * s_frame_setting_length;
+ const uint32_t stream_id = 0;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_SETTINGS, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the settings, each one is encoded like (RFC-7540 6.5.1):
+ * +-------------------------------+
+ * | Identifier (16) |
+ * +-------------------------------+-------------------------------+
+ * | Value (32) |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ for (size_t i = 0; i < num_settings; ++i) {
+ writes_ok &= aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id);
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value);
+ }
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * PING
+ **********************************************************************************************************************/
+struct aws_h2_frame *aws_h2_frame_new_ping(
+ struct aws_allocator *allocator,
+ bool ack,
+ const uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]) {
+
+ /* PING can be pre-encoded */
+ const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0;
+ const size_t payload_len = AWS_HTTP2_PING_DATA_SIZE;
+ const uint32_t stream_id = 0;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PING, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the PING payload (RFC-7540 6.7):
+ * +---------------------------------------------------------------+
+ * | |
+ * | Opaque Data (64) |
+ * | |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_HTTP2_PING_DATA_SIZE);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ /* PING responses SHOULD be given higher priority than any other frame */
+ frame->base.high_priority = ack;
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * GOAWAY
+ **********************************************************************************************************************/
+static const size_t s_frame_goaway_length_min = 8;
+
+struct aws_h2_frame *aws_h2_frame_new_goaway(
+ struct aws_allocator *allocator,
+ uint32_t last_stream_id,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data) {
+
+ /* If debug_data is too long, don't sent it.
+ * It's more important that the GOAWAY frame gets sent. */
+ const size_t debug_data_max = s_prebuilt_payload_max() - s_frame_goaway_length_min;
+ if (debug_data.len > debug_data_max) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_ENCODER,
+ "Sending GOAWAY without debug-data. Debug-data size %zu exceeds internal limit of %zu",
+ debug_data.len,
+ debug_data_max);
+
+ debug_data.len = 0;
+ }
+
+ /* It would be illegal to send a lower value, this is unrecoverable */
+ AWS_FATAL_ASSERT(last_stream_id <= AWS_H2_STREAM_ID_MAX);
+
+ /* GOAWAY can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = debug_data.len + s_frame_goaway_length_min;
+ const uint32_t stream_id = 0;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_GOAWAY, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the GOAWAY payload (RFC-7540 6.8):
+ * +-+-------------------------------------------------------------+
+ * |R| Last-Stream-ID (31) |
+ * +-+-------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ * | Additional Debug Data (*) |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, last_stream_id);
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code);
+ writes_ok &= aws_byte_buf_write_from_whole_cursor(&frame->encoded_buf, debug_data);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * WINDOW_UPDATE
+ **********************************************************************************************************************/
+static const size_t s_frame_window_update_length = 4;
+
+struct aws_h2_frame *aws_h2_frame_new_window_update(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t window_size_increment) {
+
+ /* Note: stream_id may be zero or non-zero */
+ if (stream_id > AWS_H2_STREAM_ID_MAX) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_ENCODER,
+ "Window increment size %" PRIu32 " exceeds HTTP/2 max %" PRIu32,
+ window_size_increment,
+ AWS_H2_WINDOW_UPDATE_MAX);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* WINDOW_UPDATE can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = s_frame_window_update_length;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the WINDOW_UPDATE payload (RFC-7540 6.9):
+ * +-+-------------------------------------------------------------+
+ * |R| Window Size Increment (31) |
+ * +-+-------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, window_size_increment);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+void aws_h2_frame_destroy(struct aws_h2_frame *frame) {
+ if (frame) {
+ frame->vtable->destroy(frame);
+ }
+}
+
+int aws_h2_encode_frame(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_h2_frame *frame,
+ struct aws_byte_buf *output,
+ bool *frame_complete) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(frame);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(frame_complete);
+
+ if (encoder->has_errored) {
+ ENCODER_LOG(ERROR, encoder, "Encoder cannot be used again after an error");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ if (encoder->current_frame && (encoder->current_frame != frame)) {
+ ENCODER_LOG(ERROR, encoder, "Cannot encode new frame until previous frame completes");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ *frame_complete = false;
+
+ if (frame->vtable->encode(frame, encoder, output, frame_complete)) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Failed to encode frame type=%s stream_id=%" PRIu32 ", %s",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ aws_error_name(aws_last_error()));
+ encoder->has_errored = true;
+ return AWS_OP_ERR;
+ }
+
+ encoder->current_frame = *frame_complete ? NULL : frame;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_h2_frame_encoder_set_setting_header_table_size(struct aws_h2_frame_encoder *encoder, uint32_t data) {
+ /* Setting for dynamic table size changed from peer, we will update the dynamic table size when we encoder the next
+ * header block */
+ aws_hpack_encoder_update_max_table_size(&encoder->hpack, data);
+}
+
+void aws_h2_frame_encoder_set_setting_max_frame_size(struct aws_h2_frame_encoder *encoder, uint32_t data) {
+ encoder->settings.max_frame_size = data;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_stream.c b/contrib/restricted/aws/aws-c-http/source/h2_stream.c
new file mode 100644
index 00000000000..85232db0066
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_stream.c
@@ -0,0 +1,1321 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_stream.h>
+
+#include <aws/http/private/h2_connection.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/status_code.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+/* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */
+#undef DEBUG
+
+static void s_stream_destroy(struct aws_http_stream *stream_base);
+static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size);
+static int s_stream_reset_stream(struct aws_http_stream *stream_base, uint32_t http2_error);
+static int s_stream_get_received_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error);
+static int s_stream_get_sent_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error);
+static int s_stream_write_data(
+ struct aws_http_stream *stream_base,
+ const struct aws_http2_stream_write_data_options *options);
+
+static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream, struct aws_h2err stream_error);
+static int s_stream_reset_stream_internal(struct aws_http_stream *stream_base, struct aws_h2err stream_error);
+
+struct aws_http_stream_vtable s_h2_stream_vtable = {
+ .destroy = s_stream_destroy,
+ .update_window = s_stream_update_window,
+ .activate = aws_h2_stream_activate,
+ .http1_write_chunk = NULL,
+ .http2_reset_stream = s_stream_reset_stream,
+ .http2_get_received_error_code = s_stream_get_received_error_code,
+ .http2_get_sent_error_code = s_stream_get_sent_error_code,
+ .http2_write_data = s_stream_write_data,
+};
+
+const char *aws_h2_stream_state_to_str(enum aws_h2_stream_state state) {
+ switch (state) {
+ case AWS_H2_STREAM_STATE_IDLE:
+ return "IDLE";
+ case AWS_H2_STREAM_STATE_RESERVED_LOCAL:
+ return "RESERVED_LOCAL";
+ case AWS_H2_STREAM_STATE_RESERVED_REMOTE:
+ return "RESERVED_REMOTE";
+ case AWS_H2_STREAM_STATE_OPEN:
+ return "OPEN";
+ case AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL:
+ return "HALF_CLOSED_LOCAL";
+ case AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE:
+ return "HALF_CLOSED_REMOTE";
+ case AWS_H2_STREAM_STATE_CLOSED:
+ return "CLOSED";
+ default:
+ /* unreachable */
+ AWS_ASSERT(0);
+ return "*** UNKNOWN ***";
+ }
+}
+
+static struct aws_h2_connection *s_get_h2_connection(const struct aws_h2_stream *stream) {
+ return AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h2_connection, base);
+}
+
+static void s_lock_synced_data(struct aws_h2_stream *stream) {
+ int err = aws_mutex_lock(&stream->synced_data.lock);
+ AWS_ASSERT(!err && "lock failed");
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_h2_stream *stream) {
+ int err = aws_mutex_unlock(&stream->synced_data.lock);
+ AWS_ASSERT(!err && "unlock failed");
+ (void)err;
+}
+
+#define AWS_PRECONDITION_ON_CHANNEL_THREAD(STREAM) \
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(s_get_h2_connection(STREAM)->base.channel_slot->channel))
+
+static bool s_client_state_allows_frame_type[AWS_H2_STREAM_STATE_COUNT][AWS_H2_FRAME_TYPE_COUNT] = {
+ /* State before anything is sent or received */
+ [AWS_H2_STREAM_STATE_IDLE] = {0},
+ /* Client streams are never in reserved (local) state */
+ [AWS_H2_STREAM_STATE_RESERVED_LOCAL] = {0},
+ /* Client received push-request via PUSH_PROMISE on another stream.
+ * Waiting for push-response to start arriving on this server-initiated stream. */
+ [AWS_H2_STREAM_STATE_RESERVED_REMOTE] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ },
+ /* Client is sending request and has not received full response yet. */
+ [AWS_H2_STREAM_STATE_OPEN] =
+ {
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Client has sent full request (END_STREAM), but has not received full response yet. */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL] =
+ {
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Client has received full response (END_STREAM), but is still sending request (uncommon). */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE] =
+ {
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Full request sent (END_STREAM) and full response received (END_STREAM).
+ * OR sent RST_STREAM. OR received RST_STREAM. */
+ [AWS_H2_STREAM_STATE_CLOSED] = {0},
+};
+
+static bool s_server_state_allows_frame_type[AWS_H2_STREAM_STATE_COUNT][AWS_H2_FRAME_TYPE_COUNT] = {
+ /* State before anything is sent or received, waiting for request headers to arrives and start things off */
+ [AWS_H2_STREAM_STATE_IDLE] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ },
+ /* Server sent push-request via PUSH_PROMISE on a client-initiated stream,
+ * but hasn't started sending the push-response on this server-initiated stream yet. */
+ [AWS_H2_STREAM_STATE_RESERVED_LOCAL] =
+ {
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Server streams are never in reserved (remote) state */
+ [AWS_H2_STREAM_STATE_RESERVED_REMOTE] = {0},
+ /* Server is receiving request, and has sent full response yet. */
+ [AWS_H2_STREAM_STATE_OPEN] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Server has sent full response (END_STREAM), but has not received full response yet (uncommon). */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Server has received full request (END_STREAM), and is still sending response. */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE] =
+ {
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Full request received (END_STREAM) and full response sent (END_STREAM).
+ * OR sent RST_STREAM. OR received RST_STREAM. */
+ [AWS_H2_STREAM_STATE_CLOSED] = {0},
+};
+
+/* Returns the appropriate Stream Error if given frame not allowed in current state */
+static struct aws_h2err s_check_state_allows_frame_type(
+ const struct aws_h2_stream *stream,
+ enum aws_h2_frame_type frame_type) {
+
+ AWS_PRECONDITION(frame_type < AWS_H2_FRAME_T_UNKNOWN); /* Decoder won't invoke callbacks for unknown frame types */
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ const enum aws_h2_stream_state state = stream->thread_data.state;
+
+ bool allowed;
+ if (stream->base.server_data) {
+ allowed = s_server_state_allows_frame_type[state][frame_type];
+ } else {
+ allowed = s_client_state_allows_frame_type[state][frame_type];
+ }
+
+ if (allowed) {
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ /* Determine specific error code */
+ enum aws_http2_error_code h2_error_code = AWS_HTTP2_ERR_PROTOCOL_ERROR;
+
+ /* If peer knows the state is closed, then it's a STREAM_CLOSED error */
+ if (state == AWS_H2_STREAM_STATE_CLOSED || state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE) {
+ h2_error_code = AWS_HTTP2_ERR_STREAM_CLOSED;
+ }
+
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Malformed message, cannot receive %s frame in %s state",
+ aws_h2_frame_type_to_str(frame_type),
+ aws_h2_stream_state_to_str(state));
+
+ return aws_h2err_from_h2_code(h2_error_code);
+}
+
+static int s_stream_send_update_window_frame(struct aws_h2_stream *stream, size_t increment_size) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ AWS_PRECONDITION(increment_size <= AWS_H2_WINDOW_UPDATE_MAX);
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ struct aws_h2_frame *stream_window_update_frame =
+ aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, (uint32_t)increment_size);
+
+ if (!stream_window_update_frame) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Failed to create WINDOW_UPDATE frame on connection, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, stream_window_update_frame);
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2_stream *aws_h2_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+ AWS_PRECONDITION(client_connection);
+ AWS_PRECONDITION(options);
+
+ struct aws_h2_stream *stream = aws_mem_calloc(client_connection->alloc, 1, sizeof(struct aws_h2_stream));
+
+ /* Initialize base stream */
+ stream->base.vtable = &s_h2_stream_vtable;
+ stream->base.alloc = client_connection->alloc;
+ stream->base.owning_connection = client_connection;
+ stream->base.user_data = options->user_data;
+ stream->base.on_incoming_headers = options->on_response_headers;
+ stream->base.on_incoming_header_block_done = options->on_response_header_block_done;
+ stream->base.on_incoming_body = options->on_response_body;
+ stream->base.on_complete = options->on_complete;
+ stream->base.on_destroy = options->on_destroy;
+ stream->base.client_data = &stream->base.client_or_server_data.client;
+ stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ aws_linked_list_init(&stream->thread_data.outgoing_writes);
+ aws_linked_list_init(&stream->synced_data.pending_write_list);
+
+ /* Stream refcount starts at 1, and gets incremented again for the connection upon a call to activate() */
+ aws_atomic_init_int(&stream->base.refcount, 1);
+
+ enum aws_http_version message_version = aws_http_message_get_protocol_version(options->request);
+ switch (message_version) {
+ case AWS_HTTP_VERSION_1_1:
+ /* TODO: don't automatic transform HTTP/1 message. Let user explicitly pass in HTTP/2 request */
+ stream->thread_data.outgoing_message =
+ aws_http2_message_new_from_http1(stream->base.alloc, options->request);
+ if (!stream->thread_data.outgoing_message) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Stream failed to create the HTTP/2 message from HTTP/1.1 message");
+ goto error;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ stream->thread_data.outgoing_message = options->request;
+ aws_http_message_acquire(stream->thread_data.outgoing_message);
+ break;
+ default:
+ /* Not supported */
+ aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL);
+ goto error;
+ }
+ struct aws_byte_cursor method;
+ AWS_ZERO_STRUCT(method);
+ if (aws_http_message_get_request_method(options->request, &method)) {
+ goto error;
+ }
+ stream->base.request_method = aws_http_str_to_method(method);
+
+ /* Init H2 specific stuff */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_IDLE;
+ /* stream end is implicit if the request isn't using manual data writes */
+ stream->synced_data.manual_write_ended = !options->http2_use_manual_data_writes;
+ stream->manual_write = options->http2_use_manual_data_writes;
+
+ /* if there's a request body to write, add it as the first outgoing write */
+ struct aws_input_stream *body_stream = aws_http_message_get_body_stream(options->request);
+ if (body_stream) {
+ struct aws_h2_stream_data_write *body_write =
+ aws_mem_calloc(stream->base.alloc, 1, sizeof(struct aws_h2_stream_data_write));
+ body_write->data_stream = aws_input_stream_acquire(body_stream);
+ body_write->end_stream = !stream->manual_write;
+ aws_linked_list_push_back(&stream->thread_data.outgoing_writes, &body_write->node);
+ }
+
+ stream->sent_reset_error_code = -1;
+ stream->received_reset_error_code = -1;
+ stream->synced_data.reset_error.h2_code = AWS_HTTP2_ERR_COUNT;
+ stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_INIT;
+ if (aws_mutex_init(&stream->synced_data.lock)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Mutex init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+ aws_channel_task_init(
+ &stream->cross_thread_work_task, s_stream_cross_thread_work_task, stream, "HTTP/2 stream cross-thread work");
+ return stream;
+error:
+ s_stream_destroy(&stream->base);
+ return NULL;
+}
+
+static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ struct aws_h2_stream *stream = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto end;
+ }
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ if (aws_h2_stream_get_state(stream) == AWS_H2_STREAM_STATE_CLOSED) {
+ /* stream is closed, silently ignoring the requests from user */
+ AWS_H2_STREAM_LOG(
+ TRACE, stream, "Stream closed before cross thread work task runs, ignoring everything was sent by user.");
+ goto end;
+ }
+
+ /* Not sending window update at half closed remote state */
+ bool ignore_window_update = (aws_h2_stream_get_state(stream) == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE);
+ bool reset_called;
+ size_t window_update_size;
+ struct aws_h2err reset_error;
+
+ struct aws_linked_list pending_writes;
+ aws_linked_list_init(&pending_writes);
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+ stream->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ /* window_update_size is ensured to be not greater than AWS_H2_WINDOW_UPDATE_MAX */
+ window_update_size = stream->synced_data.window_update_size;
+ stream->synced_data.window_update_size = 0;
+ reset_called = stream->synced_data.reset_called;
+ reset_error = stream->synced_data.reset_error;
+
+ /* copy out pending writes */
+ aws_linked_list_swap_contents(&pending_writes, &stream->synced_data.pending_write_list);
+
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (window_update_size > 0 && !ignore_window_update) {
+ if (s_stream_send_update_window_frame(stream, window_update_size)) {
+ /* Treat this as a connection error */
+ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error());
+ }
+ }
+
+ /* The largest legal value will be 2 * max window size, which is way less than INT64_MAX, so if the window_size_self
+ * overflows, remote peer will find it out. So just apply the change and ignore the possible overflow.*/
+ stream->thread_data.window_size_self += window_update_size;
+
+ if (reset_called) {
+ struct aws_h2err returned_h2err = s_send_rst_and_close_stream(stream, reset_error);
+ if (aws_h2err_failed(returned_h2err)) {
+ aws_h2_connection_shutdown_due_to_write_err(connection, returned_h2err.aws_code);
+ }
+ }
+
+ if (stream->thread_data.waiting_for_writes && !aws_linked_list_empty(&pending_writes)) {
+ /* Got more to write, move the stream back to outgoing list */
+ aws_linked_list_remove(&stream->node);
+ aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node);
+ stream->thread_data.waiting_for_writes = false;
+ }
+ /* move any pending writes to the outgoing write queue */
+ aws_linked_list_move_all_back(&stream->thread_data.outgoing_writes, &pending_writes);
+
+ /* It's likely that frames were queued while processing cross-thread work.
+ * If so, try writing them now */
+ aws_h2_try_write_outgoing_frames(connection);
+
+end:
+ aws_http_stream_release(&stream->base);
+}
+
+static void s_stream_data_write_destroy(
+ struct aws_h2_stream *stream,
+ struct aws_h2_stream_data_write *write,
+ int error_code) {
+
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(write);
+ if (write->on_complete) {
+ write->on_complete(&stream->base, error_code, write->user_data);
+ }
+ if (write->data_stream) {
+ aws_input_stream_release(write->data_stream);
+ }
+ aws_mem_release(stream->base.alloc, write);
+}
+
+static void s_h2_stream_destroy_pending_writes(struct aws_h2_stream *stream) {
+ /**
+ * Only called when stream is not active and will never be active afterward (destroying).
+ * Under this assumption, we can safely touch `stream->synced_data.pending_write_list` without
+ * lock, as the user can only add write to the list when the stream is ACTIVE
+ */
+ AWS_ASSERT(stream->synced_data.api_state != AWS_H2_STREAM_API_STATE_ACTIVE);
+ aws_linked_list_move_all_back(
+ &stream->thread_data.outgoing_writes,
+ &stream->synced_data.pending_write_list); /* clean up any outgoing writes */
+ while (!aws_linked_list_empty(&stream->thread_data.outgoing_writes)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.outgoing_writes);
+ struct aws_h2_stream_data_write *write = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node);
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_STREAM, "Stream closing, cancelling write of stream %p", (void *)write->data_stream);
+ s_stream_data_write_destroy(stream, write, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED);
+ }
+}
+
+static void s_stream_destroy(struct aws_http_stream *stream_base) {
+ AWS_PRECONDITION(stream_base);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+
+ s_h2_stream_destroy_pending_writes(stream);
+
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Destroying stream");
+ aws_mutex_clean_up(&stream->synced_data.lock);
+ aws_http_message_release(stream->thread_data.outgoing_message);
+
+ aws_mem_release(stream->base.alloc, stream);
+}
+
+void aws_h2_stream_complete(struct aws_h2_stream *stream, int error_code) {
+ { /* BEGIN CRITICAL SECTION */
+ /* clean up any pending writes */
+ s_lock_synced_data(stream);
+ /* The stream is complete now, this will prevent further writes from being queued */
+ stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_COMPLETE;
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ s_h2_stream_destroy_pending_writes(stream);
+
+ /* Invoke callback */
+ if (stream->base.on_complete) {
+ stream->base.on_complete(&stream->base, error_code, stream->base.user_data);
+ }
+}
+
+static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size) {
+ AWS_PRECONDITION(stream_base);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ if (!increment_size) {
+ return;
+ }
+ if (!connection->base.stream_manual_window_management) {
+ /* auto-mode, manual update window is not supported */
+ AWS_H2_STREAM_LOG(
+ DEBUG, stream, "Manual window management is off, update window operations are not supported.");
+ return;
+ }
+
+ int err = 0;
+ bool stream_is_init;
+ bool cross_thread_work_should_schedule = false;
+ size_t sum_size;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+
+ err |= aws_add_size_checked(stream->synced_data.window_update_size, increment_size, &sum_size);
+ err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX;
+ stream_is_init = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT;
+
+ if (!err && !stream_is_init) {
+ cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ stream->synced_data.window_update_size = sum_size;
+ }
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (cross_thread_work_should_schedule) {
+ AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task");
+ /* increment the refcount of stream to keep it alive until the task runs */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task);
+ return;
+ }
+
+ if (stream_is_init) {
+ AWS_H2_STREAM_LOG(
+ ERROR,
+ stream,
+ "Stream update window failed. Stream is in initialized state, please activate the stream first.");
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return;
+ }
+
+ if (err) {
+ /* The increment_size is still not 100% safe, since we cannot control the incoming data frame. So just
+ * ruled out the value that is obviously wrong values */
+ AWS_H2_STREAM_LOG(
+ ERROR,
+ stream,
+ "The stream's flow-control window has been incremented beyond 2**31 -1, the max for HTTP/2. The stream "
+ "will close.");
+ aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ struct aws_h2err stream_error = {
+ .aws_code = AWS_ERROR_OVERFLOW_DETECTED,
+ .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR,
+ };
+ /* Only when stream is not initialized reset will fail. So, we can assert it to be succeed. */
+ AWS_FATAL_ASSERT(s_stream_reset_stream_internal(stream_base, stream_error) == AWS_OP_SUCCESS);
+ }
+ return;
+}
+
+static int s_stream_reset_stream_internal(struct aws_http_stream *stream_base, struct aws_h2err stream_error) {
+
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ bool reset_called;
+ bool stream_is_init;
+ bool cross_thread_work_should_schedule = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+
+ reset_called = stream->synced_data.reset_called;
+ stream_is_init = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT;
+ if (!reset_called && !stream_is_init) {
+ cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.reset_called = true;
+ stream->synced_data.reset_error = stream_error;
+ }
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (stream_is_init) {
+ AWS_H2_STREAM_LOG(
+ ERROR, stream, "Reset stream failed. Stream is in initialized state, please activate the stream first.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ if (cross_thread_work_should_schedule) {
+ AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task");
+ /* increment the refcount of stream to keep it alive until the task runs */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task);
+ return AWS_OP_SUCCESS;
+ }
+ if (reset_called) {
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Reset stream ignored. Reset stream has been called already.");
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_stream_reset_stream(struct aws_http_stream *stream_base, uint32_t http2_error) {
+ struct aws_h2err stream_error = {
+ .aws_code = AWS_ERROR_HTTP_RST_STREAM_SENT,
+ .h2_code = http2_error,
+ };
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: User requested RST_STREAM with error code %s (0x%x)",
+ (void *)stream_base,
+ aws_http2_error_code_to_str(http2_error),
+ http2_error);
+ return s_stream_reset_stream_internal(stream_base, stream_error);
+}
+
+static int s_stream_get_received_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error) {
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ if (stream->received_reset_error_code == -1) {
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+ *out_http2_error = (uint32_t)stream->received_reset_error_code;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_stream_get_sent_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error) {
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ if (stream->sent_reset_error_code == -1) {
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+ *out_http2_error = (uint32_t)stream->sent_reset_error_code;
+ return AWS_OP_SUCCESS;
+}
+
+enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *stream) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ return stream->thread_data.state;
+}
+
+/* Given a Stream Error, send RST_STREAM frame and close stream.
+ * A Connection Error is returned if something goes catastrophically wrong */
+static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream, struct aws_h2err stream_error) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ AWS_PRECONDITION(stream->thread_data.state != AWS_H2_STREAM_STATE_CLOSED);
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ AWS_H2_STREAM_LOGF(
+ DEBUG,
+ stream,
+ "Sending RST_STREAM with error code %s (0x%x). State -> CLOSED",
+ aws_http2_error_code_to_str(stream_error.h2_code),
+ stream_error.h2_code);
+
+ /* Send RST_STREAM */
+ struct aws_h2_frame *rst_stream_frame =
+ aws_h2_frame_new_rst_stream(stream->base.alloc, stream->base.id, stream_error.h2_code);
+ AWS_FATAL_ASSERT(rst_stream_frame != NULL);
+ aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream_frame); /* connection takes ownership of frame */
+ stream->sent_reset_error_code = stream_error.h2_code;
+
+ /* Tell connection that stream is now closed */
+ if (aws_h2_connection_on_stream_closed(
+ connection, stream, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT, stream_error.aws_code)) {
+ return aws_h2err_from_last_error();
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self) {
+ if (self) {
+ if (stream->thread_data.window_size_self + size_changed > AWS_H2_WINDOW_UPDATE_MAX) {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+ stream->thread_data.window_size_self += size_changed;
+ } else {
+ if ((int64_t)stream->thread_data.window_size_peer + size_changed > AWS_H2_WINDOW_UPDATE_MAX) {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+ stream->thread_data.window_size_peer += size_changed;
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+static inline bool s_h2_stream_has_outgoing_writes(struct aws_h2_stream *stream) {
+ return !aws_linked_list_empty(&stream->thread_data.outgoing_writes);
+}
+
+static void s_h2_stream_write_data_complete(struct aws_h2_stream *stream, bool *waiting_writes) {
+ AWS_PRECONDITION(waiting_writes);
+ AWS_PRECONDITION(s_h2_stream_has_outgoing_writes(stream));
+
+ /* finish/clean up the current write operation */
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.outgoing_writes);
+ struct aws_h2_stream_data_write *write_op = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node);
+ const bool ending_stream = write_op->end_stream;
+ s_stream_data_write_destroy(stream, write_op, AWS_OP_SUCCESS);
+
+ /* check to see if there are more queued writes or stream_end was called */
+ *waiting_writes = !ending_stream && !s_h2_stream_has_outgoing_writes(stream);
+}
+
+static struct aws_h2_stream_data_write *s_h2_stream_get_current_write(struct aws_h2_stream *stream) {
+ AWS_PRECONDITION(s_h2_stream_has_outgoing_writes(stream));
+ struct aws_linked_list_node *node = aws_linked_list_front(&stream->thread_data.outgoing_writes);
+ struct aws_h2_stream_data_write *write = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node);
+ return write;
+}
+
+static struct aws_input_stream *s_h2_stream_get_data_stream(struct aws_h2_stream *stream) {
+ struct aws_h2_stream_data_write *write = s_h2_stream_get_current_write(stream);
+ return write->data_stream;
+}
+
+static bool s_h2_stream_does_current_write_end_stream(struct aws_h2_stream *stream) {
+ struct aws_h2_stream_data_write *write = s_h2_stream_get_current_write(stream);
+ return write->end_stream;
+}
+
+int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_body_state *body_state) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ /* Create HEADERS frame */
+ struct aws_http_message *msg = stream->thread_data.outgoing_message;
+ /* Should be ensured when the stream is created */
+ AWS_ASSERT(aws_http_message_get_protocol_version(msg) == AWS_HTTP_VERSION_2);
+ /* If manual write, always has data to be sent. */
+ bool with_data = aws_http_message_get_body_stream(msg) != NULL || stream->manual_write;
+
+ struct aws_http_headers *h2_headers = aws_http_message_get_headers(msg);
+
+ struct aws_h2_frame *headers_frame = aws_h2_frame_new_headers(
+ stream->base.alloc,
+ stream->base.id,
+ h2_headers,
+ !with_data /* end_stream */,
+ 0 /* padding - not currently configurable via public API */,
+ NULL /* priority - not currently configurable via public API */);
+
+ if (!headers_frame) {
+ AWS_H2_STREAM_LOGF(ERROR, stream, "Failed to create HEADERS frame: %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Initialize the flow-control window size */
+ stream->thread_data.window_size_peer =
+ connection->thread_data.settings_peer[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ stream->thread_data.window_size_self =
+ connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+
+ if (with_data) {
+ /* If stream has DATA to send, put it in the outgoing_streams_list, and we'll send data later */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_OPEN;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sending HEADERS. State -> OPEN");
+ } else {
+ /* If stream has no body, then HEADERS frame marks the end of outgoing data */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sending HEADERS with END_STREAM. State -> HALF_CLOSED_LOCAL");
+ }
+
+ if (s_h2_stream_has_outgoing_writes(stream)) {
+ *body_state = AWS_H2_STREAM_BODY_STATE_ONGOING;
+ } else {
+ if (stream->manual_write) {
+ stream->thread_data.waiting_for_writes = true;
+ *body_state = AWS_H2_STREAM_BODY_STATE_WAITING_WRITES;
+ } else {
+ *body_state = AWS_H2_STREAM_BODY_STATE_NONE;
+ }
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, headers_frame);
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+int aws_h2_stream_encode_data_frame(
+ struct aws_h2_stream *stream,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ int *data_encode_status) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ AWS_PRECONDITION(
+ stream->thread_data.state == AWS_H2_STREAM_STATE_OPEN ||
+ stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE);
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ AWS_PRECONDITION(connection->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE);
+
+ if (stream->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ /* The stream is stalled now */
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED;
+ return AWS_OP_SUCCESS;
+ }
+
+ *data_encode_status = AWS_H2_DATA_ENCODE_COMPLETE;
+ struct aws_input_stream *input_stream = s_h2_stream_get_data_stream(stream);
+ AWS_ASSERT(input_stream);
+
+ bool input_stream_complete = false;
+ bool input_stream_stalled = false;
+ bool ends_stream = s_h2_stream_does_current_write_end_stream(stream);
+ if (aws_h2_encode_data_frame(
+ encoder,
+ stream->base.id,
+ input_stream,
+ ends_stream,
+ 0 /*pad_length*/,
+ &stream->thread_data.window_size_peer,
+ &connection->thread_data.window_size_peer,
+ output,
+ &input_stream_complete,
+ &input_stream_stalled)) {
+
+ /* Failed to write DATA, treat it as a Stream Error */
+ AWS_H2_STREAM_LOGF(ERROR, stream, "Error encoding stream DATA, %s", aws_error_name(aws_last_error()));
+ struct aws_h2err returned_h2err = s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ if (aws_h2err_failed(returned_h2err)) {
+ aws_h2_connection_shutdown_due_to_write_err(connection, returned_h2err.aws_code);
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ bool waiting_writes = false;
+ if (input_stream_complete) {
+ s_h2_stream_write_data_complete(stream, &waiting_writes);
+ }
+
+ /*
+ * input_stream_complete for manual writes just means the current outgoing_write is complete. The body is not
+ * complete for real until the stream is told to close
+ */
+ if (input_stream_complete && ends_stream) {
+ /* Done sending data. No more data will be sent. */
+ if (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE) {
+ /* Both sides have sent END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sent END_STREAM. State -> CLOSED");
+ /* Tell connection that stream is now closed */
+ if (aws_h2_connection_on_stream_closed(
+ connection, stream, AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM, AWS_ERROR_SUCCESS)) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ /* Else can't close until we receive END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sent END_STREAM. State -> HALF_CLOSED_LOCAL");
+ }
+ } else {
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING;
+ if (input_stream_stalled) {
+ AWS_ASSERT(!input_stream_complete);
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED;
+ }
+ if (stream->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ /* if body and window both stalled, we take the window stalled status, which will take the stream out
+ * from outgoing list */
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED;
+ }
+ if (waiting_writes) {
+ /* if window stalled and we waiting for manual writes, we take waiting writes status, which will be handled
+ * properly if more writes coming, but windows is still stalled. But not the other way around. */
+ AWS_ASSERT(input_stream_complete);
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_begin(struct aws_h2_stream *stream) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_HEADERS);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_i(
+ struct aws_h2_stream *stream,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because we already checked
+ * at start of HEADERS frame in aws_h2_stream_on_decoder_headers_begin() */
+
+ bool is_server = stream->base.server_data;
+
+ /* RFC-7540 8.1 - Message consists of:
+ * - 0+ Informational 1xx headers (response-only, decoder validates that this only occurs in responses)
+ * - 1 main headers with normal request or response.
+ * - 0 or 1 trailing headers with no pseudo-headers */
+ switch (block_type) {
+ case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL:
+ if (stream->thread_data.received_main_headers) {
+ AWS_H2_STREAM_LOG(
+ ERROR, stream, "Malformed message, received informational (1xx) response after main response");
+ goto malformed;
+ }
+ break;
+ case AWS_HTTP_HEADER_BLOCK_MAIN:
+ if (stream->thread_data.received_main_headers) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Malformed message, received second set of headers");
+ goto malformed;
+ }
+ break;
+ case AWS_HTTP_HEADER_BLOCK_TRAILING:
+ if (!stream->thread_data.received_main_headers) {
+ /* A HEADERS frame without any pseudo-headers looks like trailing headers to the decoder */
+ AWS_H2_STREAM_LOG(ERROR, stream, "Malformed headers lack required pseudo-header fields.");
+ goto malformed;
+ }
+ break;
+ default:
+ AWS_ASSERT(0);
+ }
+
+ if (is_server) {
+ return aws_h2err_from_aws_code(AWS_ERROR_UNIMPLEMENTED);
+
+ } else {
+ /* Client */
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_STATUS: {
+ uint64_t status_code = 0;
+ int err = aws_byte_cursor_utf8_parse_u64(header->value, &status_code);
+ AWS_ASSERT(!err && "Invalid :status value. Decoder should have already validated this");
+ (void)err;
+
+ stream->base.client_data->response_status = (int)status_code;
+ } break;
+ case AWS_HTTP_HEADER_CONTENT_LENGTH: {
+ if (stream->thread_data.content_length_received) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Duplicate content-length value");
+ goto malformed;
+ }
+ if (aws_byte_cursor_utf8_parse_u64(header->value, &stream->thread_data.incoming_content_length)) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Invalid content-length value");
+ goto malformed;
+ }
+ stream->thread_data.content_length_received = true;
+ } break;
+ default:
+ break;
+ }
+ }
+
+ if (stream->base.on_incoming_headers) {
+ if (stream->base.on_incoming_headers(&stream->base, block_type, header, 1, stream->base.user_data)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Incoming header callback raised error, %s", aws_error_name(aws_last_error()));
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+
+malformed:
+ /* RFC-9113 8.1.1 Malformed requests or responses that are detected MUST be treated as a stream error
+ * (Section 5.4.2) of type PROTOCOL_ERROR.*/
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_end(
+ struct aws_h2_stream *stream,
+ bool malformed,
+ enum aws_http_header_block block_type) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because we already checked
+ * at start of HEADERS frame in aws_h2_stream_on_decoder_headers_begin() */
+
+ if (malformed) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Headers are malformed");
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+
+ switch (block_type) {
+ case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL:
+ AWS_H2_STREAM_LOG(TRACE, stream, "Informational 1xx header-block done.");
+ break;
+ case AWS_HTTP_HEADER_BLOCK_MAIN:
+ AWS_H2_STREAM_LOG(TRACE, stream, "Main header-block done.");
+ stream->thread_data.received_main_headers = true;
+ break;
+ case AWS_HTTP_HEADER_BLOCK_TRAILING:
+ AWS_H2_STREAM_LOG(TRACE, stream, "Trailing 1xx header-block done.");
+ break;
+ default:
+ AWS_ASSERT(0);
+ }
+
+ if (stream->base.on_incoming_header_block_done) {
+ if (stream->base.on_incoming_header_block_done(&stream->base, block_type, stream->base.user_data)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Incoming-header-block-done callback raised error, %s",
+ aws_error_name(aws_last_error()));
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_push_promise(struct aws_h2_stream *stream, uint32_t promised_stream_id) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_PUSH_PROMISE);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+
+ /* Note: Until we have a need for it, PUSH_PROMISE is not a fully supported feature.
+ * Promised streams are automatically rejected in a manner compliant with RFC-7540. */
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Automatically rejecting promised stream, PUSH_PROMISE is not fully supported");
+ if (aws_h2_connection_send_rst_and_close_reserved_stream(
+ s_get_h2_connection(stream), promised_stream_id, AWS_HTTP2_ERR_REFUSED_STREAM)) {
+ return aws_h2err_from_last_error();
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static int s_stream_send_update_window(struct aws_h2_stream *stream, uint32_t window_size) {
+ struct aws_h2_frame *stream_window_update_frame =
+ aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_size);
+ if (!stream_window_update_frame) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "WINDOW_UPDATE frame on stream failed to be sent, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame);
+ stream->thread_data.window_size_self += window_size;
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_data_begin(
+ struct aws_h2_stream *stream,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_DATA);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+
+ if (!stream->thread_data.received_main_headers) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Malformed message, received DATA before main HEADERS");
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+
+ if (stream->thread_data.content_length_received) {
+ uint64_t data_len = payload_len - total_padding_bytes;
+ if (aws_add_u64_checked(
+ stream->thread_data.incoming_data_length, data_len, &stream->thread_data.incoming_data_length)) {
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_aws_code(AWS_ERROR_OVERFLOW_DETECTED));
+ }
+
+ if (stream->thread_data.incoming_data_length > stream->thread_data.incoming_content_length) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Total received data payload=%" PRIu64 " has exceed the received content-length header, which=%" PRIi64
+ ". Closing malformed stream",
+ stream->thread_data.incoming_data_length,
+ stream->thread_data.incoming_content_length);
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+ }
+
+ /* RFC-7540 6.9.1:
+ * The sender MUST NOT send a flow-controlled frame with a length that exceeds
+ * the space available in either of the flow-control windows advertised by the receiver.
+ * Frames with zero length with the END_STREAM flag set (that is, an empty DATA frame)
+ * MAY be sent if there is no available space in either flow-control window. */
+ if ((int32_t)payload_len > stream->thread_data.window_size_self && payload_len != 0) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "DATA length=%" PRIu32 " exceeds flow-control window=%" PRIi64,
+ payload_len,
+ stream->thread_data.window_size_self);
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR));
+ }
+ stream->thread_data.window_size_self -= payload_len;
+
+ /* If stream isn't over, we may need to send automatic window updates to keep data flowing */
+ if (!end_stream) {
+ uint32_t auto_window_update;
+ if (stream->base.owning_connection->stream_manual_window_management) {
+ /* Automatically update the flow-window to account for padding, even though "manual window management"
+ * is enabled, because the current API doesn't have any way to inform the user about padding,
+ * so we can't expect them to manage it themselves. */
+ auto_window_update = total_padding_bytes;
+ } else {
+ /* Automatically update the full amount we just received */
+ auto_window_update = payload_len;
+ }
+
+ if (auto_window_update != 0) {
+ if (s_stream_send_update_window(stream, auto_window_update)) {
+ return aws_h2err_from_last_error();
+ }
+ AWS_H2_STREAM_LOGF(
+ TRACE,
+ stream,
+ "Automatically updating stream window by %" PRIu32 "(%" PRIu32 " due to padding).",
+ auto_window_update,
+ total_padding_bytes);
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_data_i(struct aws_h2_stream *stream, struct aws_byte_cursor data) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because we already checked at start of DATA frame in
+ * aws_h2_stream_on_decoder_data_begin() */
+
+ if (stream->base.on_incoming_body) {
+ if (stream->base.on_incoming_body(&stream->base, &data, stream->base.user_data)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Incoming body callback raised error, %s", aws_error_name(aws_last_error()));
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_window_update(
+ struct aws_h2_stream *stream,
+ uint32_t window_size_increment,
+ bool *window_resume) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ *window_resume = false;
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_WINDOW_UPDATE);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+ if (window_size_increment == 0) {
+ /* flow-control window increment of 0 MUST be treated as error (RFC7540 6.9.1) */
+ AWS_H2_STREAM_LOG(ERROR, stream, "Window update frame with 0 increment size");
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+ int32_t old_window_size = stream->thread_data.window_size_peer;
+ stream_err = (aws_h2_stream_window_size_change(stream, window_size_increment, false /*self*/));
+ if (aws_h2err_failed(stream_err)) {
+ /* We MUST NOT allow a flow-control window to exceed the max */
+ AWS_H2_STREAM_LOG(
+ ERROR, stream, "Window update frame causes the stream flow-control window to exceed the maximum size");
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+ if (stream->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE && old_window_size <= AWS_H2_MIN_WINDOW_SIZE) {
+ *window_resume = true;
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_end_stream(struct aws_h2_stream *stream) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because END_STREAM isn't
+ * an actual frame type. It's a flag on DATA or HEADERS frames, and we
+ * already checked the legality of those frames in their respective callbacks. */
+
+ if (stream->thread_data.content_length_received) {
+ if (stream->base.request_method != AWS_HTTP_METHOD_HEAD &&
+ stream->base.client_data->response_status != AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED) {
+ /**
+ * RFC-9110 8.6.
+ * A server MAY send a Content-Length header field in a response to a HEAD request.
+ * A server MAY send a Content-Length header field in a 304 (Not Modified) response.
+ * But both of these condition will have no body receive.
+ */
+ if (stream->thread_data.incoming_data_length != stream->thread_data.incoming_content_length) {
+ /**
+ * RFC-9113 8.1.1:
+ * A request or response is also malformed if the value of a content-length header field does not equal
+ * the sum of the DATA frame payload lengths that form the content, unless the message is defined as
+ * having no content.
+ *
+ * Clients MUST NOT accept a malformed response.
+ */
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Total received data payload=%" PRIu64
+ " does not match the received content-length header, which=%" PRIi64 ". Closing malformed stream",
+ stream->thread_data.incoming_data_length,
+ stream->thread_data.incoming_content_length);
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+ }
+ }
+
+ if (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL) {
+ /* Both sides have sent END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Received END_STREAM. State -> CLOSED");
+ /* Tell connection that stream is now closed */
+ if (aws_h2_connection_on_stream_closed(
+ s_get_h2_connection(stream),
+ stream,
+ AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM,
+ AWS_ERROR_SUCCESS)) {
+ return aws_h2err_from_last_error();
+ }
+
+ } else {
+ /* Else can't close until our side sends END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Received END_STREAM. State -> HALF_CLOSED_REMOTE");
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_rst_stream(struct aws_h2_stream *stream, uint32_t h2_error_code) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Check that this state allows RST_STREAM. */
+ struct aws_h2err err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_RST_STREAM);
+ if (aws_h2err_failed(err)) {
+ /* Usually we send a RST_STREAM when the state doesn't allow a frame type, but RFC-7540 5.4.2 says:
+ * "To avoid looping, an endpoint MUST NOT send a RST_STREAM in response to a RST_STREAM frame." */
+ return err;
+ }
+
+ /* RFC-7540 8.1 - a server MAY request that the client abort transmission of a request without error by sending a
+ * RST_STREAM with an error code of NO_ERROR after sending a complete response (i.e., a frame with the END_STREAM
+ * flag). Clients MUST NOT discard responses as a result of receiving such a RST_STREAM */
+ int aws_error_code;
+ if (stream->base.client_data && (h2_error_code == AWS_HTTP2_ERR_NO_ERROR) &&
+ (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE)) {
+
+ aws_error_code = AWS_ERROR_SUCCESS;
+
+ } else {
+ aws_error_code = AWS_ERROR_HTTP_RST_STREAM_RECEIVED;
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Peer terminated stream with HTTP/2 RST_STREAM frame, error-code=0x%x(%s)",
+ h2_error_code,
+ aws_http2_error_code_to_str(h2_error_code));
+ }
+
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ stream->received_reset_error_code = h2_error_code;
+
+ AWS_H2_STREAM_LOGF(
+ TRACE,
+ stream,
+ "Received RST_STREAM code=0x%x(%s). State -> CLOSED",
+ h2_error_code,
+ aws_http2_error_code_to_str(h2_error_code));
+
+ if (aws_h2_connection_on_stream_closed(
+ s_get_h2_connection(stream), stream, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED, aws_error_code)) {
+ return aws_h2err_from_last_error();
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static int s_stream_write_data(
+ struct aws_http_stream *stream_base,
+ const struct aws_http2_stream_write_data_options *options) {
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ if (!stream->manual_write) {
+ AWS_H2_STREAM_LOG(
+ ERROR,
+ stream,
+ "Manual writes are not enabled. You need to enable manual writes using by setting "
+ "'http2_use_manual_data_writes' to true in 'aws_http_make_request_options'");
+ return aws_raise_error(AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED);
+ }
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ /* queue this new write into the pending write list for the stream */
+ struct aws_h2_stream_data_write *pending_write =
+ aws_mem_calloc(stream->base.alloc, 1, sizeof(struct aws_h2_stream_data_write));
+ if (options->data) {
+ pending_write->data_stream = aws_input_stream_acquire(options->data);
+ } else {
+ struct aws_byte_cursor empty_cursor;
+ AWS_ZERO_STRUCT(empty_cursor);
+ pending_write->data_stream = aws_input_stream_new_from_cursor(stream->base.alloc, &empty_cursor);
+ }
+ bool schedule_cross_thread_work = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+ {
+ if (stream->synced_data.api_state != AWS_H2_STREAM_API_STATE_ACTIVE) {
+ s_unlock_synced_data(stream);
+ int error_code = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT
+ ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED
+ : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ s_stream_data_write_destroy(stream, pending_write, error_code);
+ AWS_H2_STREAM_LOG(ERROR, stream, "Cannot write DATA frames to an inactive or closed stream");
+ return aws_raise_error(error_code);
+ }
+
+ if (stream->synced_data.manual_write_ended) {
+ s_unlock_synced_data(stream);
+ s_stream_data_write_destroy(stream, pending_write, AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED);
+ AWS_H2_STREAM_LOG(ERROR, stream, "Cannot write DATA frames to a stream after manual write ended");
+ /* Fail with error, otherwise, people can wait for on_complete callback that will never be invoked. */
+ return aws_raise_error(AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED);
+ }
+ /* Not setting this until we're sure we succeeded, so that callback doesn't fire on cleanup if we fail */
+ if (options->end_stream) {
+ stream->synced_data.manual_write_ended = true;
+ }
+ pending_write->end_stream = options->end_stream;
+ pending_write->on_complete = options->on_complete;
+ pending_write->user_data = options->user_data;
+
+ aws_linked_list_push_back(&stream->synced_data.pending_write_list, &pending_write->node);
+ schedule_cross_thread_work = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ }
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (schedule_cross_thread_work) {
+ AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task");
+ /* increment the refcount of stream to keep it alive until the task runs */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack.c b/contrib/restricted/aws/aws-c-http/source/hpack.c
new file mode 100644
index 00000000000..ef3d0b3dcfa
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack.c
@@ -0,0 +1,525 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/hpack.h>
+
+/* #TODO test empty strings */
+
+/* #TODO remove all OOM error handling in HTTP/2 & HPACK. make functions void if possible */
+
+/* RFC-7540 6.5.2 */
+const size_t s_hpack_dynamic_table_initial_size = 4096;
+const size_t s_hpack_dynamic_table_initial_elements = 512;
+/* TODO: shouldn't be a hardcoded max_size, it should be driven by SETTINGS_HEADER_TABLE_SIZE */
+const size_t s_hpack_dynamic_table_max_size = 16 * 1024 * 1024;
+
+/* Used for growing the dynamic table buffer when it fills up */
+const float s_hpack_dynamic_table_buffer_growth_rate = 1.5F;
+
+struct aws_http_header s_static_header_table[] = {
+#define HEADER(_index, _name) \
+ [_index] = { \
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name), \
+ },
+
+#define HEADER_WITH_VALUE(_index, _name, _value) \
+ [_index] = { \
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name), \
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \
+ },
+
+#include <aws/http/private/hpack_header_static_table.def>
+
+#undef HEADER
+#undef HEADER_WITH_VALUE
+};
+static const size_t s_static_header_table_size = AWS_ARRAY_SIZE(s_static_header_table);
+
+struct aws_byte_cursor s_static_header_table_name_only[] = {
+#define HEADER(_index, _name) [_index] = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name),
+#define HEADER_WITH_VALUE(_index, _name, _value) HEADER(_index, _name)
+
+#include <aws/http/private/hpack_header_static_table.def>
+
+#undef HEADER
+#undef HEADER_WITH_VALUE
+};
+
+/* aws_http_header * -> size_t */
+static struct aws_hash_table s_static_header_reverse_lookup;
+/* aws_byte_cursor * -> size_t */
+static struct aws_hash_table s_static_header_reverse_lookup_name_only;
+
+static uint64_t s_header_hash(const void *key) {
+ const struct aws_http_header *header = key;
+
+ return aws_hash_combine(aws_hash_byte_cursor_ptr(&header->name), aws_hash_byte_cursor_ptr(&header->value));
+}
+
+static bool s_header_eq(const void *a, const void *b) {
+ const struct aws_http_header *left = a;
+ const struct aws_http_header *right = b;
+
+ if (!aws_byte_cursor_eq(&left->name, &right->name)) {
+ return false;
+ }
+
+ /* If the header stored in the table doesn't have a value, then it's a match */
+ return aws_byte_cursor_eq(&left->value, &right->value);
+}
+
+void aws_hpack_static_table_init(struct aws_allocator *allocator) {
+
+ int result = aws_hash_table_init(
+ &s_static_header_reverse_lookup,
+ allocator,
+ s_static_header_table_size - 1,
+ s_header_hash,
+ s_header_eq,
+ NULL,
+ NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+
+ result = aws_hash_table_init(
+ &s_static_header_reverse_lookup_name_only,
+ allocator,
+ s_static_header_table_size - 1,
+ aws_hash_byte_cursor_ptr,
+ (aws_hash_callback_eq_fn *)aws_byte_cursor_eq,
+ NULL,
+ NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+
+ /* Process in reverse so that name_only prefers lower indices */
+ for (size_t i = s_static_header_table_size - 1; i > 0; --i) {
+ /* the tables are created as 1-based indexing */
+ result = aws_hash_table_put(&s_static_header_reverse_lookup, &s_static_header_table[i], (void *)i, NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+
+ result = aws_hash_table_put(
+ &s_static_header_reverse_lookup_name_only, &s_static_header_table_name_only[i], (void *)(i), NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+ }
+}
+
+void aws_hpack_static_table_clean_up() {
+ aws_hash_table_clean_up(&s_static_header_reverse_lookup);
+ aws_hash_table_clean_up(&s_static_header_reverse_lookup_name_only);
+}
+
+#define HPACK_LOGF(level, hpack, text, ...) \
+ AWS_LOGF_##level((hpack)->log_subject, "id=%p [HPACK]: " text, (hpack)->log_id, __VA_ARGS__)
+#define HPACK_LOG(level, hpack, text) HPACK_LOGF(level, hpack, "%s", text)
+
+void aws_hpack_context_init(
+ struct aws_hpack_context *context,
+ struct aws_allocator *allocator,
+ enum aws_http_log_subject log_subject,
+ const void *log_id) {
+
+ AWS_ZERO_STRUCT(*context);
+ context->allocator = allocator;
+ context->log_subject = log_subject;
+ context->log_id = log_id;
+
+ /* Initialize dynamic table */
+ context->dynamic_table.max_size = s_hpack_dynamic_table_initial_size;
+ context->dynamic_table.buffer_capacity = s_hpack_dynamic_table_initial_elements;
+ context->dynamic_table.buffer =
+ aws_mem_calloc(allocator, context->dynamic_table.buffer_capacity, sizeof(struct aws_http_header));
+
+ aws_hash_table_init(
+ &context->dynamic_table.reverse_lookup,
+ allocator,
+ s_hpack_dynamic_table_initial_elements,
+ s_header_hash,
+ s_header_eq,
+ NULL,
+ NULL);
+
+ aws_hash_table_init(
+ &context->dynamic_table.reverse_lookup_name_only,
+ allocator,
+ s_hpack_dynamic_table_initial_elements,
+ aws_hash_byte_cursor_ptr,
+ (aws_hash_callback_eq_fn *)aws_byte_cursor_eq,
+ NULL,
+ NULL);
+}
+
+static struct aws_http_header *s_dynamic_table_get(const struct aws_hpack_context *context, size_t index);
+
+static void s_clean_up_dynamic_table_buffer(struct aws_hpack_context *context) {
+ while (context->dynamic_table.num_elements > 0) {
+ struct aws_http_header *back = s_dynamic_table_get(context, context->dynamic_table.num_elements - 1);
+ context->dynamic_table.num_elements -= 1;
+ /* clean-up the memory we allocate for it */
+ aws_mem_release(context->allocator, back->name.ptr);
+ }
+ aws_mem_release(context->allocator, context->dynamic_table.buffer);
+}
+
+void aws_hpack_context_clean_up(struct aws_hpack_context *context) {
+ if (context->dynamic_table.buffer) {
+ s_clean_up_dynamic_table_buffer(context);
+ }
+ aws_hash_table_clean_up(&context->dynamic_table.reverse_lookup);
+ aws_hash_table_clean_up(&context->dynamic_table.reverse_lookup_name_only);
+ AWS_ZERO_STRUCT(*context);
+}
+
+size_t aws_hpack_get_header_size(const struct aws_http_header *header) {
+ return header->name.len + header->value.len + 32;
+}
+
+size_t aws_hpack_get_dynamic_table_num_elements(const struct aws_hpack_context *context) {
+ return context->dynamic_table.num_elements;
+}
+
+size_t aws_hpack_get_dynamic_table_max_size(const struct aws_hpack_context *context) {
+ return context->dynamic_table.max_size;
+}
+
+/*
+ * Gets the header from the dynamic table.
+ * NOTE: This function only bounds checks on the buffer size, not the number of elements.
+ */
+static struct aws_http_header *s_dynamic_table_get(const struct aws_hpack_context *context, size_t index) {
+
+ AWS_ASSERT(index < context->dynamic_table.buffer_capacity);
+
+ return &context->dynamic_table
+ .buffer[(context->dynamic_table.index_0 + index) % context->dynamic_table.buffer_capacity];
+}
+
+const struct aws_http_header *aws_hpack_get_header(const struct aws_hpack_context *context, size_t index) {
+ if (index == 0 || index >= s_static_header_table_size + context->dynamic_table.num_elements) {
+ aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ return NULL;
+ }
+
+ /* Check static table */
+ if (index < s_static_header_table_size) {
+ return &s_static_header_table[index];
+ }
+
+ /* Check dynamic table */
+ return s_dynamic_table_get(context, index - s_static_header_table_size);
+}
+
+/* TODO: remove `bool search_value`, this option has no reason to exist */
+size_t aws_hpack_find_index(
+ const struct aws_hpack_context *context,
+ const struct aws_http_header *header,
+ bool search_value,
+ bool *found_value) {
+
+ *found_value = false;
+
+ struct aws_hash_element *elem = NULL;
+ if (search_value) {
+ /* Check name-and-value first in static table */
+ aws_hash_table_find(&s_static_header_reverse_lookup, header, &elem);
+ if (elem) {
+ /* TODO: Maybe always set found_value to true? Who cares that the value is empty if they matched? */
+ /* If an element was found, check if it has a value */
+ *found_value = ((const struct aws_http_header *)elem->key)->value.len;
+ return (size_t)elem->value;
+ }
+ /* Check name-and-value in dynamic table */
+ aws_hash_table_find(&context->dynamic_table.reverse_lookup, header, &elem);
+ if (elem) {
+ /* TODO: Maybe always set found_value to true? Who cares that the value is empty if they matched? */
+ *found_value = ((const struct aws_http_header *)elem->key)->value.len;
+ goto trans_index_from_dynamic_table;
+ }
+ }
+ /* Check the name-only table. Note, even if we search for value, when we fail in searching for name-and-value, we
+ * should also check the name only table */
+ aws_hash_table_find(&s_static_header_reverse_lookup_name_only, &header->name, &elem);
+ if (elem) {
+ return (size_t)elem->value;
+ }
+ aws_hash_table_find(&context->dynamic_table.reverse_lookup_name_only, &header->name, &elem);
+ if (elem) {
+ goto trans_index_from_dynamic_table;
+ }
+ return 0;
+
+trans_index_from_dynamic_table:
+ AWS_ASSERT(elem);
+ size_t index;
+ const size_t absolute_index = (size_t)elem->value;
+ if (absolute_index >= context->dynamic_table.index_0) {
+ index = absolute_index - context->dynamic_table.index_0;
+ } else {
+ index = (context->dynamic_table.buffer_capacity - context->dynamic_table.index_0) + absolute_index;
+ }
+ /* Need to add the static table size to re-base indicies */
+ index += s_static_header_table_size;
+ return index;
+}
+
+/* Remove elements from the dynamic table until it fits in max_size bytes */
+static int s_dynamic_table_shrink(struct aws_hpack_context *context, size_t max_size) {
+ while (context->dynamic_table.size > max_size && context->dynamic_table.num_elements > 0) {
+ struct aws_http_header *back = s_dynamic_table_get(context, context->dynamic_table.num_elements - 1);
+
+ /* "Remove" the header from the table */
+ context->dynamic_table.size -= aws_hpack_get_header_size(back);
+ context->dynamic_table.num_elements -= 1;
+
+ /* Remove old header from hash tables */
+ if (aws_hash_table_remove(&context->dynamic_table.reverse_lookup, back, NULL, NULL)) {
+ HPACK_LOG(ERROR, context, "Failed to remove header from the reverse lookup table");
+ goto error;
+ }
+
+ /* If the name-only lookup is pointing to the element we're removing, it needs to go.
+ * If not, it's pointing to a younger, sexier element. */
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&context->dynamic_table.reverse_lookup_name_only, &back->name, &elem);
+ if (elem && elem->key == back) {
+ if (aws_hash_table_remove_element(&context->dynamic_table.reverse_lookup_name_only, elem)) {
+ HPACK_LOG(ERROR, context, "Failed to remove header from the reverse lookup (name-only) table");
+ goto error;
+ }
+ }
+
+ /* clean up the memory we allocated to hold the name and value string*/
+ aws_mem_release(context->allocator, back->name.ptr);
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+/*
+ * Resizes the dynamic table storage buffer to new_max_elements.
+ * Useful when inserting over capacity, or when downsizing.
+ * Do shrink first, if you want to remove elements, or memory leak will happen.
+ */
+static int s_dynamic_table_resize_buffer(struct aws_hpack_context *context, size_t new_max_elements) {
+
+ /* Clear the old hash tables */
+ aws_hash_table_clear(&context->dynamic_table.reverse_lookup);
+ aws_hash_table_clear(&context->dynamic_table.reverse_lookup_name_only);
+
+ struct aws_http_header *new_buffer = NULL;
+
+ if (AWS_UNLIKELY(new_max_elements == 0)) {
+ /* If new buffer is of size 0, don't both initializing, just clean up the old one. */
+ goto cleanup_old_buffer;
+ }
+
+ /* Allocate the new buffer */
+ new_buffer = aws_mem_calloc(context->allocator, new_max_elements, sizeof(struct aws_http_header));
+ if (!new_buffer) {
+ return AWS_OP_ERR;
+ }
+
+ /* Don't bother copying data if old buffer was of size 0 */
+ if (AWS_UNLIKELY(context->dynamic_table.num_elements == 0)) {
+ goto reset_dyn_table_state;
+ }
+
+ /*
+ * Take a buffer that looks like this:
+ *
+ * Index 0
+ * ^
+ * +---------------------------+
+ * | Below Block | Above Block |
+ * +---------------------------+
+ * And make it look like this:
+ *
+ * Index 0
+ * ^
+ * +-------------+-------------+
+ * | Above Block | Below Block |
+ * +-------------+-------------+
+ */
+
+ /* Copy as much the above block as possible */
+ size_t above_block_size = context->dynamic_table.buffer_capacity - context->dynamic_table.index_0;
+ if (above_block_size > new_max_elements) {
+ above_block_size = new_max_elements;
+ }
+ memcpy(
+ new_buffer,
+ context->dynamic_table.buffer + context->dynamic_table.index_0,
+ above_block_size * sizeof(struct aws_http_header));
+
+ /* Copy as much of below block as possible */
+ const size_t free_blocks_available = new_max_elements - above_block_size;
+ const size_t old_blocks_to_copy = context->dynamic_table.buffer_capacity - above_block_size;
+ const size_t below_block_size = aws_min_size(free_blocks_available, old_blocks_to_copy);
+ if (below_block_size) {
+ memcpy(
+ new_buffer + above_block_size,
+ context->dynamic_table.buffer,
+ below_block_size * sizeof(struct aws_http_header));
+ }
+
+ /* Free the old memory */
+cleanup_old_buffer:
+ aws_mem_release(context->allocator, context->dynamic_table.buffer);
+
+ /* Reset state */
+reset_dyn_table_state:
+ if (context->dynamic_table.num_elements > new_max_elements) {
+ context->dynamic_table.num_elements = new_max_elements;
+ }
+ context->dynamic_table.buffer_capacity = new_max_elements;
+ context->dynamic_table.index_0 = 0;
+ context->dynamic_table.buffer = new_buffer;
+
+ /* Re-insert all of the reverse lookup elements */
+ for (size_t i = 0; i < context->dynamic_table.num_elements; ++i) {
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup, &context->dynamic_table.buffer[i], (void *)i, NULL)) {
+ return AWS_OP_ERR;
+ }
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup_name_only,
+ &context->dynamic_table.buffer[i].name,
+ (void *)i,
+ NULL)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_http_header *header) {
+
+ /* Don't move forward if no elements allowed in the dynamic table */
+ if (AWS_UNLIKELY(context->dynamic_table.max_size == 0)) {
+ return AWS_OP_SUCCESS;
+ }
+
+ const size_t header_size = aws_hpack_get_header_size(header);
+
+ /* If for whatever reason this new header is bigger than the total table size, burn everything to the ground. */
+ if (AWS_UNLIKELY(header_size > context->dynamic_table.max_size)) {
+ /* #TODO handle this. It's not an error. It should simply result in an empty table RFC-7541 4.4 */
+ goto error;
+ }
+
+ /* Rotate out headers until there's room for the new header (this function will return immediately if nothing needs
+ * to be evicted) */
+ if (s_dynamic_table_shrink(context, context->dynamic_table.max_size - header_size)) {
+ goto error;
+ }
+
+ /* If we're out of space in the buffer, grow it */
+ if (context->dynamic_table.num_elements == context->dynamic_table.buffer_capacity) {
+ /* If the buffer is currently of 0 size, reset it back to its initial size */
+ const size_t new_size =
+ context->dynamic_table.buffer_capacity
+ ? (size_t)(context->dynamic_table.buffer_capacity * s_hpack_dynamic_table_buffer_growth_rate)
+ : s_hpack_dynamic_table_initial_elements;
+
+ if (s_dynamic_table_resize_buffer(context, new_size)) {
+ goto error;
+ }
+ }
+
+ /* Decrement index 0, wrapping if necessary */
+ if (context->dynamic_table.index_0 == 0) {
+ context->dynamic_table.index_0 = context->dynamic_table.buffer_capacity - 1;
+ } else {
+ context->dynamic_table.index_0--;
+ }
+
+ /* Increment num_elements */
+ context->dynamic_table.num_elements++;
+ /* Increment the size */
+ context->dynamic_table.size += header_size;
+
+ /* Put the header at the "front" of the table */
+ struct aws_http_header *table_header = s_dynamic_table_get(context, 0);
+
+ /* TODO:: We can optimize this with ring buffer. */
+ /* allocate memory for the name and value, which will be deallocated whenever the entry is evicted from the table or
+ * the table is cleaned up. We keep the pointer in the name pointer of each entry */
+ const size_t buf_memory_size = header->name.len + header->value.len;
+
+ if (buf_memory_size) {
+ uint8_t *buf_memory = aws_mem_acquire(context->allocator, buf_memory_size);
+ if (!buf_memory) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_buf buf = aws_byte_buf_from_empty_array(buf_memory, buf_memory_size);
+ /* Copy header, then backup strings into our own allocation */
+ *table_header = *header;
+ aws_byte_buf_append_and_update(&buf, &table_header->name);
+ aws_byte_buf_append_and_update(&buf, &table_header->value);
+ } else {
+ /* if buf_memory_size is 0, no memory needed, we will insert the empty header into dynamic table */
+ *table_header = *header;
+ table_header->name.ptr = NULL;
+ table_header->value.ptr = NULL;
+ }
+ /* Write the new header to the look up tables */
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup, table_header, (void *)context->dynamic_table.index_0, NULL)) {
+ goto error;
+ }
+ /* Note that we can just blindly put here, we want to overwrite any older entry so it isn't accidentally removed. */
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup_name_only,
+ &table_header->name,
+ (void *)context->dynamic_table.index_0,
+ NULL)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ /* Do not attempt to handle the error, if something goes wrong, close the connection */
+ return AWS_OP_ERR;
+}
+
+int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size) {
+
+ /* Nothing to see here! */
+ if (new_max_size == context->dynamic_table.max_size) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (new_max_size > s_hpack_dynamic_table_max_size) {
+
+ HPACK_LOGF(
+ ERROR,
+ context,
+ "New dynamic table max size %zu is greater than the supported max size (%zu)",
+ new_max_size,
+ s_hpack_dynamic_table_max_size);
+ aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ goto error;
+ }
+
+ /* If downsizing, remove elements until we're within the new size constraints */
+ if (s_dynamic_table_shrink(context, new_max_size)) {
+ goto error;
+ }
+
+ /* Resize the buffer to the current size */
+ if (s_dynamic_table_resize_buffer(context, context->dynamic_table.num_elements)) {
+ goto error;
+ }
+
+ /* Update the max size */
+ context->dynamic_table.max_size = new_max_size;
+
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c b/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
new file mode 100644
index 00000000000..936cd8d4f57
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
@@ -0,0 +1,446 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/hpack.h>
+
+#define HPACK_LOGF(level, decoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_DECODER, "id=%p [HPACK]: " text, (decoder)->log_id, __VA_ARGS__)
+#define HPACK_LOG(level, decoder, text) HPACK_LOGF(level, decoder, "%s", text)
+
+struct aws_huffman_symbol_coder *hpack_get_coder(void);
+
+/* Used while decoding the header name & value, grows if necessary */
+const size_t s_hpack_decoder_scratch_initial_size = 512;
+
+void aws_hpack_decoder_init(struct aws_hpack_decoder *decoder, struct aws_allocator *allocator, const void *log_id) {
+ AWS_ZERO_STRUCT(*decoder);
+ decoder->log_id = log_id;
+
+ aws_huffman_decoder_init(&decoder->huffman_decoder, hpack_get_coder());
+ aws_huffman_decoder_allow_growth(&decoder->huffman_decoder, true);
+
+ aws_hpack_context_init(&decoder->context, allocator, AWS_LS_HTTP_DECODER, log_id);
+
+ aws_byte_buf_init(&decoder->progress_entry.scratch, allocator, s_hpack_decoder_scratch_initial_size);
+
+ decoder->dynamic_table_protocol_max_size_setting = aws_hpack_get_dynamic_table_max_size(&decoder->context);
+}
+
+void aws_hpack_decoder_clean_up(struct aws_hpack_decoder *decoder) {
+ aws_hpack_context_clean_up(&decoder->context);
+ aws_byte_buf_clean_up(&decoder->progress_entry.scratch);
+ AWS_ZERO_STRUCT(*decoder);
+}
+
+static const struct aws_http_header *s_get_header_u64(const struct aws_hpack_decoder *decoder, uint64_t index) {
+ if (index > SIZE_MAX) {
+ HPACK_LOG(ERROR, decoder, "Header index is absurdly large");
+ aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ return NULL;
+ }
+
+ return aws_hpack_get_header(&decoder->context, (size_t)index);
+}
+
+void aws_hpack_decoder_update_max_table_size(struct aws_hpack_decoder *decoder, uint32_t setting_max_size) {
+ decoder->dynamic_table_protocol_max_size_setting = setting_max_size;
+}
+
+/* Return a byte with the N right-most bits masked.
+ * Ex: 2 -> 00000011 */
+static uint8_t s_masked_right_bits_u8(uint8_t num_masked_bits) {
+ AWS_ASSERT(num_masked_bits <= 8);
+ const uint8_t cut_bits = 8 - num_masked_bits;
+ return UINT8_MAX >> cut_bits;
+}
+
+int aws_hpack_decode_integer(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ uint8_t prefix_size,
+ uint64_t *integer,
+ bool *complete) {
+
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(to_decode);
+ AWS_PRECONDITION(prefix_size <= 8);
+ AWS_PRECONDITION(integer);
+
+ const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size);
+
+ struct hpack_progress_integer *progress = &decoder->progress_integer;
+
+ while (to_decode->len) {
+ switch (progress->state) {
+ case HPACK_INTEGER_STATE_INIT: {
+ /* Read the first byte, and check whether this is it, or we need to continue */
+ uint8_t byte = 0;
+ bool succ = aws_byte_cursor_read_u8(to_decode, &byte);
+ AWS_FATAL_ASSERT(succ);
+
+ /* Cut the prefix */
+ byte &= prefix_mask;
+
+ /* No matter what, the first byte's value is always added to the integer */
+ *integer = byte;
+
+ if (byte != prefix_mask) {
+ goto handle_complete;
+ }
+
+ progress->state = HPACK_INTEGER_STATE_VALUE;
+ } break;
+
+ case HPACK_INTEGER_STATE_VALUE: {
+ uint8_t byte = 0;
+ bool succ = aws_byte_cursor_read_u8(to_decode, &byte);
+ AWS_FATAL_ASSERT(succ);
+
+ uint64_t new_byte_value = (uint64_t)(byte & 127) << progress->bit_count;
+ if (*integer + new_byte_value < *integer) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ *integer += new_byte_value;
+
+ /* Check if we're done */
+ if ((byte & 128) == 0) {
+ goto handle_complete;
+ }
+
+ /* Increment the bit count */
+ progress->bit_count += 7;
+
+ /* 7 Bits are expected to be used, so if we get to the point where any of
+ * those bits can't be used it's a decoding error */
+ if (progress->bit_count > 64 - 7) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ } break;
+ }
+ }
+
+ /* Fell out of data loop, must need more data */
+ *complete = false;
+ return AWS_OP_SUCCESS;
+
+handle_complete:
+ AWS_ZERO_STRUCT(decoder->progress_integer);
+ *complete = true;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_hpack_decode_string(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_byte_buf *output,
+ bool *complete) {
+
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(to_decode);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(complete);
+
+ struct hpack_progress_string *progress = &decoder->progress_string;
+
+ while (to_decode->len) {
+ switch (progress->state) {
+ case HPACK_STRING_STATE_INIT: {
+ /* Do init stuff */
+ progress->state = HPACK_STRING_STATE_LENGTH;
+ progress->use_huffman = *to_decode->ptr >> 7;
+ aws_huffman_decoder_reset(&decoder->huffman_decoder);
+ /* fallthrough, since we didn't consume any data */
+ }
+ /* FALLTHRU */
+ case HPACK_STRING_STATE_LENGTH: {
+ bool length_complete = false;
+ if (aws_hpack_decode_integer(decoder, to_decode, 7, &progress->length, &length_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!length_complete) {
+ goto handle_ongoing;
+ }
+
+ if (progress->length == 0) {
+ goto handle_complete;
+ }
+
+ if (progress->length > SIZE_MAX) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+
+ progress->state = HPACK_STRING_STATE_VALUE;
+ } break;
+
+ case HPACK_STRING_STATE_VALUE: {
+ /* Take either as much data as we need, or as much as we can */
+ size_t to_process = aws_min_size((size_t)progress->length, to_decode->len);
+ progress->length -= to_process;
+
+ struct aws_byte_cursor chunk = aws_byte_cursor_advance(to_decode, to_process);
+
+ if (progress->use_huffman) {
+ if (aws_huffman_decode(&decoder->huffman_decoder, &chunk, output)) {
+ HPACK_LOGF(ERROR, decoder, "Error from Huffman decoder: %s", aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ /* Decoder should consume all bytes we feed it.
+ * EOS (end-of-string) symbol could stop it early, but HPACK says to treat EOS as error. */
+ if (chunk.len != 0) {
+ HPACK_LOG(ERROR, decoder, "Huffman encoded end-of-string symbol is illegal");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ } else {
+ if (aws_byte_buf_append_dynamic(output, &chunk)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* If whole length consumed, we're done */
+ if (progress->length == 0) {
+ /* #TODO Validate any padding bits left over in final byte of string.
+ * "A padding not corresponding to the most significant bits of the
+ * code for the EOS symbol MUST be treated as a decoding error" */
+
+ /* #TODO impose limits on string length */
+
+ goto handle_complete;
+ }
+ } break;
+ }
+ }
+
+handle_ongoing:
+ /* Fell out of to_decode loop, must still be in progress */
+ AWS_ASSERT(to_decode->len == 0);
+ *complete = false;
+ return AWS_OP_SUCCESS;
+
+handle_complete:
+ AWS_ASSERT(decoder->progress_string.length == 0);
+ AWS_ZERO_STRUCT(decoder->progress_string);
+ *complete = true;
+ return AWS_OP_SUCCESS;
+}
+
+/* Implements RFC-7541 Section 6 - Binary Format */
+int aws_hpack_decode(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_hpack_decode_result *result) {
+
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(to_decode);
+ AWS_PRECONDITION(result);
+
+ /* Run state machine until we decode a complete entry.
+ * Every state requires data, so we can simply loop until no more data available. */
+ while (to_decode->len) {
+ switch (decoder->progress_entry.state) {
+
+ case HPACK_ENTRY_STATE_INIT: {
+ /* Reset entry */
+ AWS_ZERO_STRUCT(decoder->progress_entry.u);
+ decoder->progress_entry.scratch.len = 0;
+
+ /* Determine next state by looking at first few bits of the next byte:
+ * 1xxxxxxx: Indexed Header Field Representation
+ * 01xxxxxx: Literal Header Field with Incremental Indexing
+ * 001xxxxx: Dynamic Table Size Update
+ * 0001xxxx: Literal Header Field Never Indexed
+ * 0000xxxx: Literal Header Field without Indexing */
+ uint8_t first_byte = to_decode->ptr[0];
+ if (first_byte & (1 << 7)) {
+ /* 1xxxxxxx: Indexed Header Field Representation */
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_INDEXED;
+
+ } else if (first_byte & (1 << 6)) {
+ /* 01xxxxxx: Literal Header Field with Incremental Indexing */
+ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_USE_CACHE;
+ decoder->progress_entry.u.literal.prefix_size = 6;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN;
+
+ } else if (first_byte & (1 << 5)) {
+ /* 001xxxxx: Dynamic Table Size Update */
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE;
+
+ } else if (first_byte & (1 << 4)) {
+ /* 0001xxxx: Literal Header Field Never Indexed */
+ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE;
+ decoder->progress_entry.u.literal.prefix_size = 4;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN;
+ } else {
+ /* 0000xxxx: Literal Header Field without Indexing */
+ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_NO_CACHE;
+ decoder->progress_entry.u.literal.prefix_size = 4;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN;
+ }
+ } break;
+
+ /* RFC-7541 6.1. Indexed Header Field Representation.
+ * Decode one integer, which is an index into the table.
+ * Result is the header name and value stored there. */
+ case HPACK_ENTRY_STATE_INDEXED: {
+ bool complete = false;
+ uint64_t *index = &decoder->progress_entry.u.indexed.index;
+ if (aws_hpack_decode_integer(decoder, to_decode, 7, index, &complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!complete) {
+ break;
+ }
+
+ const struct aws_http_header *header = s_get_header_u64(decoder, *index);
+ if (!header) {
+ return AWS_OP_ERR;
+ }
+
+ result->type = AWS_HPACK_DECODE_T_HEADER_FIELD;
+ result->data.header_field = *header;
+ goto handle_complete;
+ } break;
+
+ /* RFC-7541 6.2. Literal Header Field Representation.
+ * We use multiple states to decode a literal...
+ * The header-name MAY come from the table and MAY be encoded as a string.
+ * The header-value is ALWAYS encoded as a string.
+ *
+ * This BEGIN state decodes one integer.
+ * If it's non-zero, then it's the index in the table where we'll get the header-name from.
+ * If it's zero, then we move to the HEADER_NAME state and decode header-name as a string instead */
+ case HPACK_ENTRY_STATE_LITERAL_BEGIN: {
+ struct hpack_progress_literal *literal = &decoder->progress_entry.u.literal;
+
+ bool index_complete = false;
+ if (aws_hpack_decode_integer(
+ decoder, to_decode, literal->prefix_size, &literal->name_index, &index_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!index_complete) {
+ break;
+ }
+
+ if (literal->name_index == 0) {
+ /* Index 0 means header-name is not in table. Need to decode header-name as a string instead */
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_NAME_STRING;
+ break;
+ }
+
+ /* Otherwise we found index of header-name in table. */
+ const struct aws_http_header *header = s_get_header_u64(decoder, literal->name_index);
+ if (!header) {
+ return AWS_OP_ERR;
+ }
+
+ /* Store the name in scratch. We don't just keep a pointer to it because it could be
+ * evicted from the dynamic table later, when we save the literal. */
+ if (aws_byte_buf_append_dynamic(&decoder->progress_entry.scratch, &header->name)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Move on to decoding header-value.
+ * Value will also decode into the scratch, so save where name ends. */
+ literal->name_length = header->name.len;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_VALUE_STRING;
+ } break;
+
+ /* We only end up in this state if header-name is encoded as string. */
+ case HPACK_ENTRY_STATE_LITERAL_NAME_STRING: {
+ bool string_complete = false;
+ if (aws_hpack_decode_string(decoder, to_decode, &decoder->progress_entry.scratch, &string_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!string_complete) {
+ break;
+ }
+
+ /* Done decoding name string! Move on to decoding the value string.
+ * Value will also decode into the scratch, so save where name ends. */
+ decoder->progress_entry.u.literal.name_length = decoder->progress_entry.scratch.len;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_VALUE_STRING;
+ } break;
+
+ /* Final state for "literal" entries.
+ * Decode the header-value string, then deliver the results. */
+ case HPACK_ENTRY_STATE_LITERAL_VALUE_STRING: {
+ bool string_complete = false;
+ if (aws_hpack_decode_string(decoder, to_decode, &decoder->progress_entry.scratch, &string_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!string_complete) {
+ break;
+ }
+
+ /* Done decoding value string. Done decoding entry. */
+ struct hpack_progress_literal *literal = &decoder->progress_entry.u.literal;
+
+ /* Set up a header with name and value (which are packed one after the other in scratch) */
+ struct aws_http_header header;
+ header.value = aws_byte_cursor_from_buf(&decoder->progress_entry.scratch);
+ header.name = aws_byte_cursor_advance(&header.value, literal->name_length);
+ header.compression = literal->compression;
+
+ /* Save to table if necessary */
+ if (literal->compression == AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) {
+ if (aws_hpack_insert_header(&decoder->context, &header)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ result->type = AWS_HPACK_DECODE_T_HEADER_FIELD;
+ result->data.header_field = header;
+ goto handle_complete;
+ } break;
+
+ /* RFC-7541 6.3. Dynamic Table Size Update
+ * Read one integer, which is the new maximum size for the dynamic table. */
+ case HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE: {
+ uint64_t *size64 = &decoder->progress_entry.u.dynamic_table_resize.size;
+ bool size_complete = false;
+ if (aws_hpack_decode_integer(decoder, to_decode, 5, size64, &size_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!size_complete) {
+ break;
+ }
+ /* The new maximum size MUST be lower than or equal to the limit determined by the protocol using HPACK.
+ * A value that exceeds this limit MUST be treated as a decoding error. */
+ if (*size64 > decoder->dynamic_table_protocol_max_size_setting) {
+ HPACK_LOG(ERROR, decoder, "Dynamic table update size is larger than the protocal setting");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ size_t size = (size_t)*size64;
+
+ HPACK_LOGF(TRACE, decoder, "Dynamic table size update %zu", size);
+ if (aws_hpack_resize_dynamic_table(&decoder->context, size)) {
+ return AWS_OP_ERR;
+ }
+
+ result->type = AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE;
+ result->data.dynamic_table_resize = size;
+ goto handle_complete;
+ } break;
+
+ default: {
+ AWS_ASSERT(0 && "invalid state");
+ } break;
+ }
+ }
+
+ AWS_ASSERT(to_decode->len == 0);
+ result->type = AWS_HPACK_DECODE_T_ONGOING;
+ return AWS_OP_SUCCESS;
+
+handle_complete:
+ AWS_ASSERT(result->type != AWS_HPACK_DECODE_T_ONGOING);
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_INIT;
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c b/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
new file mode 100644
index 00000000000..6d792c14c51
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
@@ -0,0 +1,418 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/hpack.h>
+
+#define HPACK_LOGF(level, encoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p [HPACK]: " text, (encoder)->log_id, __VA_ARGS__)
+#define HPACK_LOG(level, encoder, text) HPACK_LOGF(level, encoder, "%s", text)
+
+struct aws_huffman_symbol_coder *hpack_get_coder(void);
+
+void aws_hpack_encoder_init(struct aws_hpack_encoder *encoder, struct aws_allocator *allocator, const void *log_id) {
+
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->log_id = log_id;
+
+ aws_huffman_encoder_init(&encoder->huffman_encoder, hpack_get_coder());
+
+ aws_hpack_context_init(&encoder->context, allocator, AWS_LS_HTTP_ENCODER, log_id);
+
+ encoder->dynamic_table_size_update.pending = false;
+ encoder->dynamic_table_size_update.latest_value = SIZE_MAX;
+ encoder->dynamic_table_size_update.smallest_value = SIZE_MAX;
+}
+
+void aws_hpack_encoder_clean_up(struct aws_hpack_encoder *encoder) {
+ aws_hpack_context_clean_up(&encoder->context);
+ AWS_ZERO_STRUCT(*encoder);
+}
+
+void aws_hpack_encoder_set_huffman_mode(struct aws_hpack_encoder *encoder, enum aws_hpack_huffman_mode mode) {
+ encoder->huffman_mode = mode;
+}
+
+void aws_hpack_encoder_update_max_table_size(struct aws_hpack_encoder *encoder, uint32_t new_max_size) {
+
+ if (!encoder->dynamic_table_size_update.pending) {
+ encoder->dynamic_table_size_update.pending = true;
+ }
+ encoder->dynamic_table_size_update.smallest_value =
+ aws_min_size(new_max_size, encoder->dynamic_table_size_update.smallest_value);
+
+ /* TODO: don't necessarily go as high as possible. The peer said the encoder's
+ * dynamic table COULD get this big, but it's not required to.
+ * It's probably not a good idea to let the peer decide how much memory we allocate.
+ * Not sure how to cap it though... Use a hardcoded number?
+ * Match whatever SETTINGS_HEADER_TABLE_SIZE this side sends? */
+ encoder->dynamic_table_size_update.latest_value = new_max_size;
+}
+
+/* Return a byte with the N right-most bits masked.
+ * Ex: 2 -> 00000011 */
+static uint8_t s_masked_right_bits_u8(uint8_t num_masked_bits) {
+ AWS_ASSERT(num_masked_bits <= 8);
+ const uint8_t cut_bits = 8 - num_masked_bits;
+ return UINT8_MAX >> cut_bits;
+}
+
+/* If buffer isn't big enough, grow it intelligently */
+static int s_ensure_space(struct aws_byte_buf *output, size_t required_space) {
+ size_t available_space = output->capacity - output->len;
+ if (required_space <= available_space) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Capacity must grow to at least this size */
+ size_t required_capacity;
+ if (aws_add_size_checked(output->len, required_space, &required_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Prefer to double capacity, but if that's not enough grow to exactly required_capacity */
+ size_t double_capacity = aws_add_size_saturating(output->capacity, output->capacity);
+ size_t reserve = aws_max_size(required_capacity, double_capacity);
+ return aws_byte_buf_reserve(output, reserve);
+}
+
+int aws_hpack_encode_integer(
+ uint64_t integer,
+ uint8_t starting_bits,
+ uint8_t prefix_size,
+ struct aws_byte_buf *output) {
+ AWS_ASSERT(prefix_size <= 8);
+
+ const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size);
+ AWS_ASSERT((starting_bits & prefix_mask) == 0);
+
+ const size_t original_len = output->len;
+
+ if (integer < prefix_mask) {
+ /* If the integer fits inside the specified number of bits but won't be all 1's, just write it */
+
+ /* Just write out the bits we care about */
+ uint8_t first_byte = starting_bits | (uint8_t)integer;
+ if (aws_byte_buf_append_byte_dynamic(output, first_byte)) {
+ goto error;
+ }
+ } else {
+ /* Set all of the bits in the first octet to 1 */
+ uint8_t first_byte = starting_bits | prefix_mask;
+ if (aws_byte_buf_append_byte_dynamic(output, first_byte)) {
+ goto error;
+ }
+
+ integer -= prefix_mask;
+
+ const uint64_t hi_57bit_mask = UINT64_MAX - (UINT8_MAX >> 1);
+
+ do {
+ /* Take top 7 bits from the integer */
+ uint8_t this_octet = integer % 128;
+ if (integer & hi_57bit_mask) {
+ /* If there's more after this octet, set the hi bit */
+ this_octet += 128;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(output, this_octet)) {
+ goto error;
+ }
+
+ /* Remove the written bits */
+ integer >>= 7;
+ } while (integer);
+ }
+
+ return AWS_OP_SUCCESS;
+error:
+ output->len = original_len;
+ return AWS_OP_ERR;
+}
+
+int aws_hpack_encode_string(
+ struct aws_hpack_encoder *encoder,
+ struct aws_byte_cursor to_encode,
+ struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode));
+ AWS_PRECONDITION(output);
+
+ const size_t original_len = output->len;
+
+ /* Determine length of encoded string (and whether or not to use huffman) */
+ uint8_t use_huffman;
+ size_t str_length;
+ switch (encoder->huffman_mode) {
+ case AWS_HPACK_HUFFMAN_NEVER:
+ use_huffman = 0;
+ str_length = to_encode.len;
+ break;
+
+ case AWS_HPACK_HUFFMAN_ALWAYS:
+ use_huffman = 1;
+ str_length = aws_huffman_get_encoded_length(&encoder->huffman_encoder, to_encode);
+ break;
+
+ case AWS_HPACK_HUFFMAN_SMALLEST:
+ str_length = aws_huffman_get_encoded_length(&encoder->huffman_encoder, to_encode);
+ if (str_length < to_encode.len) {
+ use_huffman = 1;
+ } else {
+ str_length = to_encode.len;
+ use_huffman = 0;
+ }
+ break;
+
+ default:
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ /*
+ * String literals are encoded like so (RFC-7541 5.2):
+ * H is whether or not data is huffman-encoded.
+ *
+ * 0 1 2 3 4 5 6 7
+ * +---+---+---+---+---+---+---+---+
+ * | H | String Length (7+) |
+ * +---+---------------------------+
+ * | String Data (Length octets) |
+ * +-------------------------------+
+ */
+
+ /* Encode string length */
+ uint8_t starting_bits = use_huffman << 7;
+ if (aws_hpack_encode_integer(str_length, starting_bits, 7, output)) {
+ HPACK_LOGF(ERROR, encoder, "Error encoding HPACK integer: %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Encode string data */
+ if (str_length > 0) {
+ if (use_huffman) {
+ /* Huffman encoder doesn't grow buffer, so we ensure it's big enough here */
+ if (s_ensure_space(output, str_length)) {
+ goto error;
+ }
+
+ if (aws_huffman_encode(&encoder->huffman_encoder, &to_encode, output)) {
+ HPACK_LOGF(ERROR, encoder, "Error from Huffman encoder: %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ } else {
+ if (aws_byte_buf_append_dynamic(output, &to_encode)) {
+ goto error;
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ output->len = original_len;
+ aws_huffman_encoder_reset(&encoder->huffman_encoder);
+ return AWS_OP_ERR;
+}
+
+/* All types that HPACK might encode/decode (RFC-7541 6 - Binary Format) */
+enum aws_hpack_entry_type {
+ AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD, /* RFC-7541 6.1 */
+ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING, /* RFC-7541 6.2.1 */
+ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING, /* RFC-7541 6.2.2 */
+ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED, /* RFC-7541 6.2.3 */
+ AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE, /* RFC-7541 6.3 */
+ AWS_HPACK_ENTRY_TYPE_COUNT,
+};
+
+/**
+ * First byte each entry type looks like this (RFC-7541 6):
+ * The "xxxxx" part is the "N-bit prefix" of the entry's first encoded integer.
+ *
+ * 1xxxxxxx: Indexed Header Field Representation
+ * 01xxxxxx: Literal Header Field with Incremental Indexing
+ * 001xxxxx: Dynamic Table Size Update
+ * 0001xxxx: Literal Header Field Never Indexed
+ * 0000xxxx: Literal Header Field without Indexing
+ */
+static const uint8_t s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_TYPE_COUNT] = {
+ [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 1 << 7,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 1 << 6,
+ [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 1 << 5,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 1 << 4,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 0 << 4,
+};
+
+static const uint8_t s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_TYPE_COUNT] = {
+ [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 7,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 6,
+ [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 5,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 4,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 4,
+};
+
+static int s_convert_http_compression_to_literal_entry_type(
+ enum aws_http_header_compression compression,
+ enum aws_hpack_entry_type *out_entry_type) {
+
+ switch (compression) {
+ case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE:
+ *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING;
+ return AWS_OP_SUCCESS;
+
+ case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE:
+ *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING;
+ return AWS_OP_SUCCESS;
+
+ case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE:
+ *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED;
+ return AWS_OP_SUCCESS;
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+}
+
+static int s_encode_header_field(
+ struct aws_hpack_encoder *encoder,
+ const struct aws_http_header *header,
+ struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(header);
+ AWS_PRECONDITION(output);
+
+ size_t original_len = output->len;
+
+ /* Search for header-field in tables */
+ bool found_indexed_value;
+ size_t header_index = aws_hpack_find_index(&encoder->context, header, true, &found_indexed_value);
+
+ if (header->compression != AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) {
+ /* If user doesn't want to use indexed value, then don't use it */
+ found_indexed_value = false;
+ }
+
+ if (header_index && found_indexed_value) {
+ /* Indexed header field */
+ const enum aws_hpack_entry_type entry_type = AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD;
+
+ /* encode the one index (along with the entry type), and we're done! */
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[entry_type];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[entry_type];
+ if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Else, Literal header field... */
+
+ /* determine exactly which type of literal header-field to encode. */
+ enum aws_hpack_entry_type literal_entry_type = AWS_HPACK_ENTRY_TYPE_COUNT;
+ if (s_convert_http_compression_to_literal_entry_type(header->compression, &literal_entry_type)) {
+ goto error;
+ }
+
+ /* the entry type makes up the first few bits of the next integer we encode */
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[literal_entry_type];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[literal_entry_type];
+
+ if (header_index) {
+ /* Literal header field, indexed name */
+
+ /* first encode the index of name */
+ if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) {
+ goto error;
+ }
+ } else {
+ /* Literal header field, new name */
+
+ /* first encode index of 0 to indicate that header-name is not indexed */
+ if (aws_hpack_encode_integer(0, starting_bit_pattern, num_prefix_bits, output)) {
+ goto error;
+ }
+
+ /* next encode header-name string */
+ if (aws_hpack_encode_string(encoder, header->name, output)) {
+ goto error;
+ }
+ }
+
+ /* then encode header-value string, and we're done encoding! */
+ if (aws_hpack_encode_string(encoder, header->value, output)) {
+ goto error;
+ }
+
+ /* if "incremental indexing" type, insert header into the dynamic table. */
+ if (AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING == literal_entry_type) {
+ if (aws_hpack_insert_header(&encoder->context, header)) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+error:
+ output->len = original_len;
+ return AWS_OP_ERR;
+}
+
+int aws_hpack_encode_header_block(
+ struct aws_hpack_encoder *encoder,
+ const struct aws_http_headers *headers,
+ struct aws_byte_buf *output) {
+
+ /* Encode a dynamic table size update at the beginning of the first header-block
+ * following the change to the dynamic table size RFC-7541 4.2 */
+ if (encoder->dynamic_table_size_update.pending) {
+ if (encoder->dynamic_table_size_update.smallest_value != encoder->dynamic_table_size_update.latest_value) {
+ size_t smallest_update_value = encoder->dynamic_table_size_update.smallest_value;
+ HPACK_LOGF(
+ TRACE, encoder, "Encoding smallest dynamic table size update entry size: %zu", smallest_update_value);
+ if (aws_hpack_resize_dynamic_table(&encoder->context, smallest_update_value)) {
+ HPACK_LOGF(ERROR, encoder, "Dynamic table resize failed, size: %zu", smallest_update_value);
+ return AWS_OP_ERR;
+ }
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ if (aws_hpack_encode_integer(smallest_update_value, starting_bit_pattern, num_prefix_bits, output)) {
+ HPACK_LOGF(
+ ERROR,
+ encoder,
+ "Integer encoding failed for table size update entry, integer: %zu",
+ smallest_update_value);
+ return AWS_OP_ERR;
+ }
+ }
+ size_t last_update_value = encoder->dynamic_table_size_update.latest_value;
+ HPACK_LOGF(TRACE, encoder, "Encoding last dynamic table size update entry size: %zu", last_update_value);
+ if (aws_hpack_resize_dynamic_table(&encoder->context, last_update_value)) {
+ HPACK_LOGF(ERROR, encoder, "Dynamic table resize failed, size: %zu", last_update_value);
+ return AWS_OP_ERR;
+ }
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ if (aws_hpack_encode_integer(last_update_value, starting_bit_pattern, num_prefix_bits, output)) {
+ HPACK_LOGF(
+ ERROR, encoder, "Integer encoding failed for table size update entry, integer: %zu", last_update_value);
+ return AWS_OP_ERR;
+ }
+
+ encoder->dynamic_table_size_update.pending = false;
+ encoder->dynamic_table_size_update.latest_value = SIZE_MAX;
+ encoder->dynamic_table_size_update.smallest_value = SIZE_MAX;
+ }
+
+ const size_t num_headers = aws_http_headers_count(headers);
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_http_header header;
+ aws_http_headers_get_index(headers, i, &header);
+ if (s_encode_header_field(encoder, &header, output)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c b/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
new file mode 100644
index 00000000000..4c832f6a7ca
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
@@ -0,0 +1,2337 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/* WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT. */
+/* clang-format off */
+
+#include <aws/compression/huffman.h>
+
+static struct aws_huffman_code code_points[] = {
+ { .pattern = 0x1ff8, .num_bits = 13 }, /* ' ' 0 */
+ { .pattern = 0x7fffd8, .num_bits = 23 }, /* ' ' 1 */
+ { .pattern = 0xfffffe2, .num_bits = 28 }, /* ' ' 2 */
+ { .pattern = 0xfffffe3, .num_bits = 28 }, /* ' ' 3 */
+ { .pattern = 0xfffffe4, .num_bits = 28 }, /* ' ' 4 */
+ { .pattern = 0xfffffe5, .num_bits = 28 }, /* ' ' 5 */
+ { .pattern = 0xfffffe6, .num_bits = 28 }, /* ' ' 6 */
+ { .pattern = 0xfffffe7, .num_bits = 28 }, /* ' ' 7 */
+ { .pattern = 0xfffffe8, .num_bits = 28 }, /* ' ' 8 */
+ { .pattern = 0xffffea, .num_bits = 24 }, /* ' ' 9 */
+ { .pattern = 0x3ffffffc, .num_bits = 30 }, /* ' ' 10 */
+ { .pattern = 0xfffffe9, .num_bits = 28 }, /* ' ' 11 */
+ { .pattern = 0xfffffea, .num_bits = 28 }, /* ' ' 12 */
+ { .pattern = 0x3ffffffd, .num_bits = 30 }, /* ' ' 13 */
+ { .pattern = 0xfffffeb, .num_bits = 28 }, /* ' ' 14 */
+ { .pattern = 0xfffffec, .num_bits = 28 }, /* ' ' 15 */
+ { .pattern = 0xfffffed, .num_bits = 28 }, /* ' ' 16 */
+ { .pattern = 0xfffffee, .num_bits = 28 }, /* ' ' 17 */
+ { .pattern = 0xfffffef, .num_bits = 28 }, /* ' ' 18 */
+ { .pattern = 0xffffff0, .num_bits = 28 }, /* ' ' 19 */
+ { .pattern = 0xffffff1, .num_bits = 28 }, /* ' ' 20 */
+ { .pattern = 0xffffff2, .num_bits = 28 }, /* ' ' 21 */
+ { .pattern = 0x3ffffffe, .num_bits = 30 }, /* ' ' 22 */
+ { .pattern = 0xffffff3, .num_bits = 28 }, /* ' ' 23 */
+ { .pattern = 0xffffff4, .num_bits = 28 }, /* ' ' 24 */
+ { .pattern = 0xffffff5, .num_bits = 28 }, /* ' ' 25 */
+ { .pattern = 0xffffff6, .num_bits = 28 }, /* ' ' 26 */
+ { .pattern = 0xffffff7, .num_bits = 28 }, /* ' ' 27 */
+ { .pattern = 0xffffff8, .num_bits = 28 }, /* ' ' 28 */
+ { .pattern = 0xffffff9, .num_bits = 28 }, /* ' ' 29 */
+ { .pattern = 0xffffffa, .num_bits = 28 }, /* ' ' 30 */
+ { .pattern = 0xffffffb, .num_bits = 28 }, /* ' ' 31 */
+ { .pattern = 0x14, .num_bits = 6 }, /* ' ' 32 */
+ { .pattern = 0x3f8, .num_bits = 10 }, /* '!' 33 */
+ { .pattern = 0x3f9, .num_bits = 10 }, /* '"' 34 */
+ { .pattern = 0xffa, .num_bits = 12 }, /* '#' 35 */
+ { .pattern = 0x1ff9, .num_bits = 13 }, /* '$' 36 */
+ { .pattern = 0x15, .num_bits = 6 }, /* '%' 37 */
+ { .pattern = 0xf8, .num_bits = 8 }, /* '&' 38 */
+ { .pattern = 0x7fa, .num_bits = 11 }, /* ''' 39 */
+ { .pattern = 0x3fa, .num_bits = 10 }, /* '(' 40 */
+ { .pattern = 0x3fb, .num_bits = 10 }, /* ')' 41 */
+ { .pattern = 0xf9, .num_bits = 8 }, /* '*' 42 */
+ { .pattern = 0x7fb, .num_bits = 11 }, /* '+' 43 */
+ { .pattern = 0xfa, .num_bits = 8 }, /* ',' 44 */
+ { .pattern = 0x16, .num_bits = 6 }, /* '-' 45 */
+ { .pattern = 0x17, .num_bits = 6 }, /* '.' 46 */
+ { .pattern = 0x18, .num_bits = 6 }, /* '/' 47 */
+ { .pattern = 0x0, .num_bits = 5 }, /* '0' 48 */
+ { .pattern = 0x1, .num_bits = 5 }, /* '1' 49 */
+ { .pattern = 0x2, .num_bits = 5 }, /* '2' 50 */
+ { .pattern = 0x19, .num_bits = 6 }, /* '3' 51 */
+ { .pattern = 0x1a, .num_bits = 6 }, /* '4' 52 */
+ { .pattern = 0x1b, .num_bits = 6 }, /* '5' 53 */
+ { .pattern = 0x1c, .num_bits = 6 }, /* '6' 54 */
+ { .pattern = 0x1d, .num_bits = 6 }, /* '7' 55 */
+ { .pattern = 0x1e, .num_bits = 6 }, /* '8' 56 */
+ { .pattern = 0x1f, .num_bits = 6 }, /* '9' 57 */
+ { .pattern = 0x5c, .num_bits = 7 }, /* ':' 58 */
+ { .pattern = 0xfb, .num_bits = 8 }, /* ';' 59 */
+ { .pattern = 0x7ffc, .num_bits = 15 }, /* '<' 60 */
+ { .pattern = 0x20, .num_bits = 6 }, /* '=' 61 */
+ { .pattern = 0xffb, .num_bits = 12 }, /* '>' 62 */
+ { .pattern = 0x3fc, .num_bits = 10 }, /* '?' 63 */
+ { .pattern = 0x1ffa, .num_bits = 13 }, /* '@' 64 */
+ { .pattern = 0x21, .num_bits = 6 }, /* 'A' 65 */
+ { .pattern = 0x5d, .num_bits = 7 }, /* 'B' 66 */
+ { .pattern = 0x5e, .num_bits = 7 }, /* 'C' 67 */
+ { .pattern = 0x5f, .num_bits = 7 }, /* 'D' 68 */
+ { .pattern = 0x60, .num_bits = 7 }, /* 'E' 69 */
+ { .pattern = 0x61, .num_bits = 7 }, /* 'F' 70 */
+ { .pattern = 0x62, .num_bits = 7 }, /* 'G' 71 */
+ { .pattern = 0x63, .num_bits = 7 }, /* 'H' 72 */
+ { .pattern = 0x64, .num_bits = 7 }, /* 'I' 73 */
+ { .pattern = 0x65, .num_bits = 7 }, /* 'J' 74 */
+ { .pattern = 0x66, .num_bits = 7 }, /* 'K' 75 */
+ { .pattern = 0x67, .num_bits = 7 }, /* 'L' 76 */
+ { .pattern = 0x68, .num_bits = 7 }, /* 'M' 77 */
+ { .pattern = 0x69, .num_bits = 7 }, /* 'N' 78 */
+ { .pattern = 0x6a, .num_bits = 7 }, /* 'O' 79 */
+ { .pattern = 0x6b, .num_bits = 7 }, /* 'P' 80 */
+ { .pattern = 0x6c, .num_bits = 7 }, /* 'Q' 81 */
+ { .pattern = 0x6d, .num_bits = 7 }, /* 'R' 82 */
+ { .pattern = 0x6e, .num_bits = 7 }, /* 'S' 83 */
+ { .pattern = 0x6f, .num_bits = 7 }, /* 'T' 84 */
+ { .pattern = 0x70, .num_bits = 7 }, /* 'U' 85 */
+ { .pattern = 0x71, .num_bits = 7 }, /* 'V' 86 */
+ { .pattern = 0x72, .num_bits = 7 }, /* 'W' 87 */
+ { .pattern = 0xfc, .num_bits = 8 }, /* 'X' 88 */
+ { .pattern = 0x73, .num_bits = 7 }, /* 'Y' 89 */
+ { .pattern = 0xfd, .num_bits = 8 }, /* 'Z' 90 */
+ { .pattern = 0x1ffb, .num_bits = 13 }, /* '[' 91 */
+ { .pattern = 0x7fff0, .num_bits = 19 }, /* '\' 92 */
+ { .pattern = 0x1ffc, .num_bits = 13 }, /* ']' 93 */
+ { .pattern = 0x3ffc, .num_bits = 14 }, /* '^' 94 */
+ { .pattern = 0x22, .num_bits = 6 }, /* '_' 95 */
+ { .pattern = 0x7ffd, .num_bits = 15 }, /* '`' 96 */
+ { .pattern = 0x3, .num_bits = 5 }, /* 'a' 97 */
+ { .pattern = 0x23, .num_bits = 6 }, /* 'b' 98 */
+ { .pattern = 0x4, .num_bits = 5 }, /* 'c' 99 */
+ { .pattern = 0x24, .num_bits = 6 }, /* 'd' 100 */
+ { .pattern = 0x5, .num_bits = 5 }, /* 'e' 101 */
+ { .pattern = 0x25, .num_bits = 6 }, /* 'f' 102 */
+ { .pattern = 0x26, .num_bits = 6 }, /* 'g' 103 */
+ { .pattern = 0x27, .num_bits = 6 }, /* 'h' 104 */
+ { .pattern = 0x6, .num_bits = 5 }, /* 'i' 105 */
+ { .pattern = 0x74, .num_bits = 7 }, /* 'j' 106 */
+ { .pattern = 0x75, .num_bits = 7 }, /* 'k' 107 */
+ { .pattern = 0x28, .num_bits = 6 }, /* 'l' 108 */
+ { .pattern = 0x29, .num_bits = 6 }, /* 'm' 109 */
+ { .pattern = 0x2a, .num_bits = 6 }, /* 'n' 110 */
+ { .pattern = 0x7, .num_bits = 5 }, /* 'o' 111 */
+ { .pattern = 0x2b, .num_bits = 6 }, /* 'p' 112 */
+ { .pattern = 0x76, .num_bits = 7 }, /* 'q' 113 */
+ { .pattern = 0x2c, .num_bits = 6 }, /* 'r' 114 */
+ { .pattern = 0x8, .num_bits = 5 }, /* 's' 115 */
+ { .pattern = 0x9, .num_bits = 5 }, /* 't' 116 */
+ { .pattern = 0x2d, .num_bits = 6 }, /* 'u' 117 */
+ { .pattern = 0x77, .num_bits = 7 }, /* 'v' 118 */
+ { .pattern = 0x78, .num_bits = 7 }, /* 'w' 119 */
+ { .pattern = 0x79, .num_bits = 7 }, /* 'x' 120 */
+ { .pattern = 0x7a, .num_bits = 7 }, /* 'y' 121 */
+ { .pattern = 0x7b, .num_bits = 7 }, /* 'z' 122 */
+ { .pattern = 0x7ffe, .num_bits = 15 }, /* '{' 123 */
+ { .pattern = 0x7fc, .num_bits = 11 }, /* '|' 124 */
+ { .pattern = 0x3ffd, .num_bits = 14 }, /* '}' 125 */
+ { .pattern = 0x1ffd, .num_bits = 13 }, /* '~' 126 */
+ { .pattern = 0xffffffc, .num_bits = 28 }, /* ' ' 127 */
+ { .pattern = 0xfffe6, .num_bits = 20 }, /* ' ' 128 */
+ { .pattern = 0x3fffd2, .num_bits = 22 }, /* ' ' 129 */
+ { .pattern = 0xfffe7, .num_bits = 20 }, /* ' ' 130 */
+ { .pattern = 0xfffe8, .num_bits = 20 }, /* ' ' 131 */
+ { .pattern = 0x3fffd3, .num_bits = 22 }, /* ' ' 132 */
+ { .pattern = 0x3fffd4, .num_bits = 22 }, /* ' ' 133 */
+ { .pattern = 0x3fffd5, .num_bits = 22 }, /* ' ' 134 */
+ { .pattern = 0x7fffd9, .num_bits = 23 }, /* ' ' 135 */
+ { .pattern = 0x3fffd6, .num_bits = 22 }, /* ' ' 136 */
+ { .pattern = 0x7fffda, .num_bits = 23 }, /* ' ' 137 */
+ { .pattern = 0x7fffdb, .num_bits = 23 }, /* ' ' 138 */
+ { .pattern = 0x7fffdc, .num_bits = 23 }, /* ' ' 139 */
+ { .pattern = 0x7fffdd, .num_bits = 23 }, /* ' ' 140 */
+ { .pattern = 0x7fffde, .num_bits = 23 }, /* ' ' 141 */
+ { .pattern = 0xffffeb, .num_bits = 24 }, /* ' ' 142 */
+ { .pattern = 0x7fffdf, .num_bits = 23 }, /* ' ' 143 */
+ { .pattern = 0xffffec, .num_bits = 24 }, /* ' ' 144 */
+ { .pattern = 0xffffed, .num_bits = 24 }, /* ' ' 145 */
+ { .pattern = 0x3fffd7, .num_bits = 22 }, /* ' ' 146 */
+ { .pattern = 0x7fffe0, .num_bits = 23 }, /* ' ' 147 */
+ { .pattern = 0xffffee, .num_bits = 24 }, /* ' ' 148 */
+ { .pattern = 0x7fffe1, .num_bits = 23 }, /* ' ' 149 */
+ { .pattern = 0x7fffe2, .num_bits = 23 }, /* ' ' 150 */
+ { .pattern = 0x7fffe3, .num_bits = 23 }, /* ' ' 151 */
+ { .pattern = 0x7fffe4, .num_bits = 23 }, /* ' ' 152 */
+ { .pattern = 0x1fffdc, .num_bits = 21 }, /* ' ' 153 */
+ { .pattern = 0x3fffd8, .num_bits = 22 }, /* ' ' 154 */
+ { .pattern = 0x7fffe5, .num_bits = 23 }, /* ' ' 155 */
+ { .pattern = 0x3fffd9, .num_bits = 22 }, /* ' ' 156 */
+ { .pattern = 0x7fffe6, .num_bits = 23 }, /* ' ' 157 */
+ { .pattern = 0x7fffe7, .num_bits = 23 }, /* ' ' 158 */
+ { .pattern = 0xffffef, .num_bits = 24 }, /* ' ' 159 */
+ { .pattern = 0x3fffda, .num_bits = 22 }, /* ' ' 160 */
+ { .pattern = 0x1fffdd, .num_bits = 21 }, /* ' ' 161 */
+ { .pattern = 0xfffe9, .num_bits = 20 }, /* ' ' 162 */
+ { .pattern = 0x3fffdb, .num_bits = 22 }, /* ' ' 163 */
+ { .pattern = 0x3fffdc, .num_bits = 22 }, /* ' ' 164 */
+ { .pattern = 0x7fffe8, .num_bits = 23 }, /* ' ' 165 */
+ { .pattern = 0x7fffe9, .num_bits = 23 }, /* ' ' 166 */
+ { .pattern = 0x1fffde, .num_bits = 21 }, /* ' ' 167 */
+ { .pattern = 0x7fffea, .num_bits = 23 }, /* ' ' 168 */
+ { .pattern = 0x3fffdd, .num_bits = 22 }, /* ' ' 169 */
+ { .pattern = 0x3fffde, .num_bits = 22 }, /* ' ' 170 */
+ { .pattern = 0xfffff0, .num_bits = 24 }, /* ' ' 171 */
+ { .pattern = 0x1fffdf, .num_bits = 21 }, /* ' ' 172 */
+ { .pattern = 0x3fffdf, .num_bits = 22 }, /* ' ' 173 */
+ { .pattern = 0x7fffeb, .num_bits = 23 }, /* ' ' 174 */
+ { .pattern = 0x7fffec, .num_bits = 23 }, /* ' ' 175 */
+ { .pattern = 0x1fffe0, .num_bits = 21 }, /* ' ' 176 */
+ { .pattern = 0x1fffe1, .num_bits = 21 }, /* ' ' 177 */
+ { .pattern = 0x3fffe0, .num_bits = 22 }, /* ' ' 178 */
+ { .pattern = 0x1fffe2, .num_bits = 21 }, /* ' ' 179 */
+ { .pattern = 0x7fffed, .num_bits = 23 }, /* ' ' 180 */
+ { .pattern = 0x3fffe1, .num_bits = 22 }, /* ' ' 181 */
+ { .pattern = 0x7fffee, .num_bits = 23 }, /* ' ' 182 */
+ { .pattern = 0x7fffef, .num_bits = 23 }, /* ' ' 183 */
+ { .pattern = 0xfffea, .num_bits = 20 }, /* ' ' 184 */
+ { .pattern = 0x3fffe2, .num_bits = 22 }, /* ' ' 185 */
+ { .pattern = 0x3fffe3, .num_bits = 22 }, /* ' ' 186 */
+ { .pattern = 0x3fffe4, .num_bits = 22 }, /* ' ' 187 */
+ { .pattern = 0x7ffff0, .num_bits = 23 }, /* ' ' 188 */
+ { .pattern = 0x3fffe5, .num_bits = 22 }, /* ' ' 189 */
+ { .pattern = 0x3fffe6, .num_bits = 22 }, /* ' ' 190 */
+ { .pattern = 0x7ffff1, .num_bits = 23 }, /* ' ' 191 */
+ { .pattern = 0x3ffffe0, .num_bits = 26 }, /* ' ' 192 */
+ { .pattern = 0x3ffffe1, .num_bits = 26 }, /* ' ' 193 */
+ { .pattern = 0xfffeb, .num_bits = 20 }, /* ' ' 194 */
+ { .pattern = 0x7fff1, .num_bits = 19 }, /* ' ' 195 */
+ { .pattern = 0x3fffe7, .num_bits = 22 }, /* ' ' 196 */
+ { .pattern = 0x7ffff2, .num_bits = 23 }, /* ' ' 197 */
+ { .pattern = 0x3fffe8, .num_bits = 22 }, /* ' ' 198 */
+ { .pattern = 0x1ffffec, .num_bits = 25 }, /* ' ' 199 */
+ { .pattern = 0x3ffffe2, .num_bits = 26 }, /* ' ' 200 */
+ { .pattern = 0x3ffffe3, .num_bits = 26 }, /* ' ' 201 */
+ { .pattern = 0x3ffffe4, .num_bits = 26 }, /* ' ' 202 */
+ { .pattern = 0x7ffffde, .num_bits = 27 }, /* ' ' 203 */
+ { .pattern = 0x7ffffdf, .num_bits = 27 }, /* ' ' 204 */
+ { .pattern = 0x3ffffe5, .num_bits = 26 }, /* ' ' 205 */
+ { .pattern = 0xfffff1, .num_bits = 24 }, /* ' ' 206 */
+ { .pattern = 0x1ffffed, .num_bits = 25 }, /* ' ' 207 */
+ { .pattern = 0x7fff2, .num_bits = 19 }, /* ' ' 208 */
+ { .pattern = 0x1fffe3, .num_bits = 21 }, /* ' ' 209 */
+ { .pattern = 0x3ffffe6, .num_bits = 26 }, /* ' ' 210 */
+ { .pattern = 0x7ffffe0, .num_bits = 27 }, /* ' ' 211 */
+ { .pattern = 0x7ffffe1, .num_bits = 27 }, /* ' ' 212 */
+ { .pattern = 0x3ffffe7, .num_bits = 26 }, /* ' ' 213 */
+ { .pattern = 0x7ffffe2, .num_bits = 27 }, /* ' ' 214 */
+ { .pattern = 0xfffff2, .num_bits = 24 }, /* ' ' 215 */
+ { .pattern = 0x1fffe4, .num_bits = 21 }, /* ' ' 216 */
+ { .pattern = 0x1fffe5, .num_bits = 21 }, /* ' ' 217 */
+ { .pattern = 0x3ffffe8, .num_bits = 26 }, /* ' ' 218 */
+ { .pattern = 0x3ffffe9, .num_bits = 26 }, /* ' ' 219 */
+ { .pattern = 0xffffffd, .num_bits = 28 }, /* ' ' 220 */
+ { .pattern = 0x7ffffe3, .num_bits = 27 }, /* ' ' 221 */
+ { .pattern = 0x7ffffe4, .num_bits = 27 }, /* ' ' 222 */
+ { .pattern = 0x7ffffe5, .num_bits = 27 }, /* ' ' 223 */
+ { .pattern = 0xfffec, .num_bits = 20 }, /* ' ' 224 */
+ { .pattern = 0xfffff3, .num_bits = 24 }, /* ' ' 225 */
+ { .pattern = 0xfffed, .num_bits = 20 }, /* ' ' 226 */
+ { .pattern = 0x1fffe6, .num_bits = 21 }, /* ' ' 227 */
+ { .pattern = 0x3fffe9, .num_bits = 22 }, /* ' ' 228 */
+ { .pattern = 0x1fffe7, .num_bits = 21 }, /* ' ' 229 */
+ { .pattern = 0x1fffe8, .num_bits = 21 }, /* ' ' 230 */
+ { .pattern = 0x7ffff3, .num_bits = 23 }, /* ' ' 231 */
+ { .pattern = 0x3fffea, .num_bits = 22 }, /* ' ' 232 */
+ { .pattern = 0x3fffeb, .num_bits = 22 }, /* ' ' 233 */
+ { .pattern = 0x1ffffee, .num_bits = 25 }, /* ' ' 234 */
+ { .pattern = 0x1ffffef, .num_bits = 25 }, /* ' ' 235 */
+ { .pattern = 0xfffff4, .num_bits = 24 }, /* ' ' 236 */
+ { .pattern = 0xfffff5, .num_bits = 24 }, /* ' ' 237 */
+ { .pattern = 0x3ffffea, .num_bits = 26 }, /* ' ' 238 */
+ { .pattern = 0x7ffff4, .num_bits = 23 }, /* ' ' 239 */
+ { .pattern = 0x3ffffeb, .num_bits = 26 }, /* ' ' 240 */
+ { .pattern = 0x7ffffe6, .num_bits = 27 }, /* ' ' 241 */
+ { .pattern = 0x3ffffec, .num_bits = 26 }, /* ' ' 242 */
+ { .pattern = 0x3ffffed, .num_bits = 26 }, /* ' ' 243 */
+ { .pattern = 0x7ffffe7, .num_bits = 27 }, /* ' ' 244 */
+ { .pattern = 0x7ffffe8, .num_bits = 27 }, /* ' ' 245 */
+ { .pattern = 0x7ffffe9, .num_bits = 27 }, /* ' ' 246 */
+ { .pattern = 0x7ffffea, .num_bits = 27 }, /* ' ' 247 */
+ { .pattern = 0x7ffffeb, .num_bits = 27 }, /* ' ' 248 */
+ { .pattern = 0xffffffe, .num_bits = 28 }, /* ' ' 249 */
+ { .pattern = 0x7ffffec, .num_bits = 27 }, /* ' ' 250 */
+ { .pattern = 0x7ffffed, .num_bits = 27 }, /* ' ' 251 */
+ { .pattern = 0x7ffffee, .num_bits = 27 }, /* ' ' 252 */
+ { .pattern = 0x7ffffef, .num_bits = 27 }, /* ' ' 253 */
+ { .pattern = 0x7fffff0, .num_bits = 27 }, /* ' ' 254 */
+ { .pattern = 0x3ffffee, .num_bits = 26 }, /* ' ' 255 */
+};
+
+static struct aws_huffman_code encode_symbol(uint8_t symbol, void *userdata) {
+ (void)userdata;
+
+ return code_points[symbol];
+}
+
+/* NOLINTNEXTLINE(readability-function-size) */
+static uint8_t decode_symbol(uint32_t bits, uint8_t *symbol, void *userdata) {
+ (void)userdata;
+
+ if (bits & 0x80000000) {
+ goto node_1;
+ } else {
+ goto node_0;
+ }
+
+node_0:
+ if (bits & 0x40000000) {
+ goto node_01;
+ } else {
+ goto node_00;
+ }
+
+node_00:
+ if (bits & 0x20000000) {
+ goto node_001;
+ } else {
+ goto node_000;
+ }
+
+node_000:
+ if (bits & 0x10000000) {
+ goto node_0001;
+ } else {
+ goto node_0000;
+ }
+
+node_0000:
+ if (bits & 0x8000000) {
+ *symbol = 49;
+ return 5;
+ } else {
+ *symbol = 48;
+ return 5;
+ }
+
+node_0001:
+ if (bits & 0x8000000) {
+ *symbol = 97;
+ return 5;
+ } else {
+ *symbol = 50;
+ return 5;
+ }
+
+node_001:
+ if (bits & 0x10000000) {
+ goto node_0011;
+ } else {
+ goto node_0010;
+ }
+
+node_0010:
+ if (bits & 0x8000000) {
+ *symbol = 101;
+ return 5;
+ } else {
+ *symbol = 99;
+ return 5;
+ }
+
+node_0011:
+ if (bits & 0x8000000) {
+ *symbol = 111;
+ return 5;
+ } else {
+ *symbol = 105;
+ return 5;
+ }
+
+node_01:
+ if (bits & 0x20000000) {
+ goto node_011;
+ } else {
+ goto node_010;
+ }
+
+node_010:
+ if (bits & 0x10000000) {
+ goto node_0101;
+ } else {
+ goto node_0100;
+ }
+
+node_0100:
+ if (bits & 0x8000000) {
+ *symbol = 116;
+ return 5;
+ } else {
+ *symbol = 115;
+ return 5;
+ }
+
+node_0101:
+ if (bits & 0x8000000) {
+ goto node_01011;
+ } else {
+ goto node_01010;
+ }
+
+node_01010:
+ if (bits & 0x4000000) {
+ *symbol = 37;
+ return 6;
+ } else {
+ *symbol = 32;
+ return 6;
+ }
+
+node_01011:
+ if (bits & 0x4000000) {
+ *symbol = 46;
+ return 6;
+ } else {
+ *symbol = 45;
+ return 6;
+ }
+
+node_011:
+ if (bits & 0x10000000) {
+ goto node_0111;
+ } else {
+ goto node_0110;
+ }
+
+node_0110:
+ if (bits & 0x8000000) {
+ goto node_01101;
+ } else {
+ goto node_01100;
+ }
+
+node_01100:
+ if (bits & 0x4000000) {
+ *symbol = 51;
+ return 6;
+ } else {
+ *symbol = 47;
+ return 6;
+ }
+
+node_01101:
+ if (bits & 0x4000000) {
+ *symbol = 53;
+ return 6;
+ } else {
+ *symbol = 52;
+ return 6;
+ }
+
+node_0111:
+ if (bits & 0x8000000) {
+ goto node_01111;
+ } else {
+ goto node_01110;
+ }
+
+node_01110:
+ if (bits & 0x4000000) {
+ *symbol = 55;
+ return 6;
+ } else {
+ *symbol = 54;
+ return 6;
+ }
+
+node_01111:
+ if (bits & 0x4000000) {
+ *symbol = 57;
+ return 6;
+ } else {
+ *symbol = 56;
+ return 6;
+ }
+
+node_1:
+ if (bits & 0x40000000) {
+ goto node_11;
+ } else {
+ goto node_10;
+ }
+
+node_10:
+ if (bits & 0x20000000) {
+ goto node_101;
+ } else {
+ goto node_100;
+ }
+
+node_100:
+ if (bits & 0x10000000) {
+ goto node_1001;
+ } else {
+ goto node_1000;
+ }
+
+node_1000:
+ if (bits & 0x8000000) {
+ goto node_10001;
+ } else {
+ goto node_10000;
+ }
+
+node_10000:
+ if (bits & 0x4000000) {
+ *symbol = 65;
+ return 6;
+ } else {
+ *symbol = 61;
+ return 6;
+ }
+
+node_10001:
+ if (bits & 0x4000000) {
+ *symbol = 98;
+ return 6;
+ } else {
+ *symbol = 95;
+ return 6;
+ }
+
+node_1001:
+ if (bits & 0x8000000) {
+ goto node_10011;
+ } else {
+ goto node_10010;
+ }
+
+node_10010:
+ if (bits & 0x4000000) {
+ *symbol = 102;
+ return 6;
+ } else {
+ *symbol = 100;
+ return 6;
+ }
+
+node_10011:
+ if (bits & 0x4000000) {
+ *symbol = 104;
+ return 6;
+ } else {
+ *symbol = 103;
+ return 6;
+ }
+
+node_101:
+ if (bits & 0x10000000) {
+ goto node_1011;
+ } else {
+ goto node_1010;
+ }
+
+node_1010:
+ if (bits & 0x8000000) {
+ goto node_10101;
+ } else {
+ goto node_10100;
+ }
+
+node_10100:
+ if (bits & 0x4000000) {
+ *symbol = 109;
+ return 6;
+ } else {
+ *symbol = 108;
+ return 6;
+ }
+
+node_10101:
+ if (bits & 0x4000000) {
+ *symbol = 112;
+ return 6;
+ } else {
+ *symbol = 110;
+ return 6;
+ }
+
+node_1011:
+ if (bits & 0x8000000) {
+ goto node_10111;
+ } else {
+ goto node_10110;
+ }
+
+node_10110:
+ if (bits & 0x4000000) {
+ *symbol = 117;
+ return 6;
+ } else {
+ *symbol = 114;
+ return 6;
+ }
+
+node_10111:
+ if (bits & 0x4000000) {
+ goto node_101111;
+ } else {
+ goto node_101110;
+ }
+
+node_101110:
+ if (bits & 0x2000000) {
+ *symbol = 66;
+ return 7;
+ } else {
+ *symbol = 58;
+ return 7;
+ }
+
+node_101111:
+ if (bits & 0x2000000) {
+ *symbol = 68;
+ return 7;
+ } else {
+ *symbol = 67;
+ return 7;
+ }
+
+node_11:
+ if (bits & 0x20000000) {
+ goto node_111;
+ } else {
+ goto node_110;
+ }
+
+node_110:
+ if (bits & 0x10000000) {
+ goto node_1101;
+ } else {
+ goto node_1100;
+ }
+
+node_1100:
+ if (bits & 0x8000000) {
+ goto node_11001;
+ } else {
+ goto node_11000;
+ }
+
+node_11000:
+ if (bits & 0x4000000) {
+ goto node_110001;
+ } else {
+ goto node_110000;
+ }
+
+node_110000:
+ if (bits & 0x2000000) {
+ *symbol = 70;
+ return 7;
+ } else {
+ *symbol = 69;
+ return 7;
+ }
+
+node_110001:
+ if (bits & 0x2000000) {
+ *symbol = 72;
+ return 7;
+ } else {
+ *symbol = 71;
+ return 7;
+ }
+
+node_11001:
+ if (bits & 0x4000000) {
+ goto node_110011;
+ } else {
+ goto node_110010;
+ }
+
+node_110010:
+ if (bits & 0x2000000) {
+ *symbol = 74;
+ return 7;
+ } else {
+ *symbol = 73;
+ return 7;
+ }
+
+node_110011:
+ if (bits & 0x2000000) {
+ *symbol = 76;
+ return 7;
+ } else {
+ *symbol = 75;
+ return 7;
+ }
+
+node_1101:
+ if (bits & 0x8000000) {
+ goto node_11011;
+ } else {
+ goto node_11010;
+ }
+
+node_11010:
+ if (bits & 0x4000000) {
+ goto node_110101;
+ } else {
+ goto node_110100;
+ }
+
+node_110100:
+ if (bits & 0x2000000) {
+ *symbol = 78;
+ return 7;
+ } else {
+ *symbol = 77;
+ return 7;
+ }
+
+node_110101:
+ if (bits & 0x2000000) {
+ *symbol = 80;
+ return 7;
+ } else {
+ *symbol = 79;
+ return 7;
+ }
+
+node_11011:
+ if (bits & 0x4000000) {
+ goto node_110111;
+ } else {
+ goto node_110110;
+ }
+
+node_110110:
+ if (bits & 0x2000000) {
+ *symbol = 82;
+ return 7;
+ } else {
+ *symbol = 81;
+ return 7;
+ }
+
+node_110111:
+ if (bits & 0x2000000) {
+ *symbol = 84;
+ return 7;
+ } else {
+ *symbol = 83;
+ return 7;
+ }
+
+node_111:
+ if (bits & 0x10000000) {
+ goto node_1111;
+ } else {
+ goto node_1110;
+ }
+
+node_1110:
+ if (bits & 0x8000000) {
+ goto node_11101;
+ } else {
+ goto node_11100;
+ }
+
+node_11100:
+ if (bits & 0x4000000) {
+ goto node_111001;
+ } else {
+ goto node_111000;
+ }
+
+node_111000:
+ if (bits & 0x2000000) {
+ *symbol = 86;
+ return 7;
+ } else {
+ *symbol = 85;
+ return 7;
+ }
+
+node_111001:
+ if (bits & 0x2000000) {
+ *symbol = 89;
+ return 7;
+ } else {
+ *symbol = 87;
+ return 7;
+ }
+
+node_11101:
+ if (bits & 0x4000000) {
+ goto node_111011;
+ } else {
+ goto node_111010;
+ }
+
+node_111010:
+ if (bits & 0x2000000) {
+ *symbol = 107;
+ return 7;
+ } else {
+ *symbol = 106;
+ return 7;
+ }
+
+node_111011:
+ if (bits & 0x2000000) {
+ *symbol = 118;
+ return 7;
+ } else {
+ *symbol = 113;
+ return 7;
+ }
+
+node_1111:
+ if (bits & 0x8000000) {
+ goto node_11111;
+ } else {
+ goto node_11110;
+ }
+
+node_11110:
+ if (bits & 0x4000000) {
+ goto node_111101;
+ } else {
+ goto node_111100;
+ }
+
+node_111100:
+ if (bits & 0x2000000) {
+ *symbol = 120;
+ return 7;
+ } else {
+ *symbol = 119;
+ return 7;
+ }
+
+node_111101:
+ if (bits & 0x2000000) {
+ *symbol = 122;
+ return 7;
+ } else {
+ *symbol = 121;
+ return 7;
+ }
+
+node_11111:
+ if (bits & 0x4000000) {
+ goto node_111111;
+ } else {
+ goto node_111110;
+ }
+
+node_111110:
+ if (bits & 0x2000000) {
+ goto node_1111101;
+ } else {
+ goto node_1111100;
+ }
+
+node_1111100:
+ if (bits & 0x1000000) {
+ *symbol = 42;
+ return 8;
+ } else {
+ *symbol = 38;
+ return 8;
+ }
+
+node_1111101:
+ if (bits & 0x1000000) {
+ *symbol = 59;
+ return 8;
+ } else {
+ *symbol = 44;
+ return 8;
+ }
+
+node_111111:
+ if (bits & 0x2000000) {
+ goto node_1111111;
+ } else {
+ goto node_1111110;
+ }
+
+node_1111110:
+ if (bits & 0x1000000) {
+ *symbol = 90;
+ return 8;
+ } else {
+ *symbol = 88;
+ return 8;
+ }
+
+node_1111111:
+ if (bits & 0x1000000) {
+ goto node_11111111;
+ } else {
+ goto node_11111110;
+ }
+
+node_11111110:
+ if (bits & 0x800000) {
+ goto node_111111101;
+ } else {
+ goto node_111111100;
+ }
+
+node_111111100:
+ if (bits & 0x400000) {
+ *symbol = 34;
+ return 10;
+ } else {
+ *symbol = 33;
+ return 10;
+ }
+
+node_111111101:
+ if (bits & 0x400000) {
+ *symbol = 41;
+ return 10;
+ } else {
+ *symbol = 40;
+ return 10;
+ }
+
+node_11111111:
+ if (bits & 0x800000) {
+ goto node_111111111;
+ } else {
+ goto node_111111110;
+ }
+
+node_111111110:
+ if (bits & 0x400000) {
+ goto node_1111111101;
+ } else {
+ *symbol = 63;
+ return 10;
+ }
+
+node_1111111101:
+ if (bits & 0x200000) {
+ *symbol = 43;
+ return 11;
+ } else {
+ *symbol = 39;
+ return 11;
+ }
+
+node_111111111:
+ if (bits & 0x400000) {
+ goto node_1111111111;
+ } else {
+ goto node_1111111110;
+ }
+
+node_1111111110:
+ if (bits & 0x200000) {
+ goto node_11111111101;
+ } else {
+ *symbol = 124;
+ return 11;
+ }
+
+node_11111111101:
+ if (bits & 0x100000) {
+ *symbol = 62;
+ return 12;
+ } else {
+ *symbol = 35;
+ return 12;
+ }
+
+node_1111111111:
+ if (bits & 0x200000) {
+ goto node_11111111111;
+ } else {
+ goto node_11111111110;
+ }
+
+node_11111111110:
+ if (bits & 0x100000) {
+ goto node_111111111101;
+ } else {
+ goto node_111111111100;
+ }
+
+node_111111111100:
+ if (bits & 0x80000) {
+ *symbol = 36;
+ return 13;
+ } else {
+ *symbol = 0;
+ return 13;
+ }
+
+node_111111111101:
+ if (bits & 0x80000) {
+ *symbol = 91;
+ return 13;
+ } else {
+ *symbol = 64;
+ return 13;
+ }
+
+node_11111111111:
+ if (bits & 0x100000) {
+ goto node_111111111111;
+ } else {
+ goto node_111111111110;
+ }
+
+node_111111111110:
+ if (bits & 0x80000) {
+ *symbol = 126;
+ return 13;
+ } else {
+ *symbol = 93;
+ return 13;
+ }
+
+node_111111111111:
+ if (bits & 0x80000) {
+ goto node_1111111111111;
+ } else {
+ goto node_1111111111110;
+ }
+
+node_1111111111110:
+ if (bits & 0x40000) {
+ *symbol = 125;
+ return 14;
+ } else {
+ *symbol = 94;
+ return 14;
+ }
+
+node_1111111111111:
+ if (bits & 0x40000) {
+ goto node_11111111111111;
+ } else {
+ goto node_11111111111110;
+ }
+
+node_11111111111110:
+ if (bits & 0x20000) {
+ *symbol = 96;
+ return 15;
+ } else {
+ *symbol = 60;
+ return 15;
+ }
+
+node_11111111111111:
+ if (bits & 0x20000) {
+ goto node_111111111111111;
+ } else {
+ *symbol = 123;
+ return 15;
+ }
+
+node_111111111111111:
+ if (bits & 0x10000) {
+ goto node_1111111111111111;
+ } else {
+ goto node_1111111111111110;
+ }
+
+node_1111111111111110:
+ if (bits & 0x8000) {
+ goto node_11111111111111101;
+ } else {
+ goto node_11111111111111100;
+ }
+
+node_11111111111111100:
+ if (bits & 0x4000) {
+ goto node_111111111111111001;
+ } else {
+ goto node_111111111111111000;
+ }
+
+node_111111111111111000:
+ if (bits & 0x2000) {
+ *symbol = 195;
+ return 19;
+ } else {
+ *symbol = 92;
+ return 19;
+ }
+
+node_111111111111111001:
+ if (bits & 0x2000) {
+ goto node_1111111111111110011;
+ } else {
+ *symbol = 208;
+ return 19;
+ }
+
+node_1111111111111110011:
+ if (bits & 0x1000) {
+ *symbol = 130;
+ return 20;
+ } else {
+ *symbol = 128;
+ return 20;
+ }
+
+node_11111111111111101:
+ if (bits & 0x4000) {
+ goto node_111111111111111011;
+ } else {
+ goto node_111111111111111010;
+ }
+
+node_111111111111111010:
+ if (bits & 0x2000) {
+ goto node_1111111111111110101;
+ } else {
+ goto node_1111111111111110100;
+ }
+
+node_1111111111111110100:
+ if (bits & 0x1000) {
+ *symbol = 162;
+ return 20;
+ } else {
+ *symbol = 131;
+ return 20;
+ }
+
+node_1111111111111110101:
+ if (bits & 0x1000) {
+ *symbol = 194;
+ return 20;
+ } else {
+ *symbol = 184;
+ return 20;
+ }
+
+node_111111111111111011:
+ if (bits & 0x2000) {
+ goto node_1111111111111110111;
+ } else {
+ goto node_1111111111111110110;
+ }
+
+node_1111111111111110110:
+ if (bits & 0x1000) {
+ *symbol = 226;
+ return 20;
+ } else {
+ *symbol = 224;
+ return 20;
+ }
+
+node_1111111111111110111:
+ if (bits & 0x1000) {
+ goto node_11111111111111101111;
+ } else {
+ goto node_11111111111111101110;
+ }
+
+node_11111111111111101110:
+ if (bits & 0x800) {
+ *symbol = 161;
+ return 21;
+ } else {
+ *symbol = 153;
+ return 21;
+ }
+
+node_11111111111111101111:
+ if (bits & 0x800) {
+ *symbol = 172;
+ return 21;
+ } else {
+ *symbol = 167;
+ return 21;
+ }
+
+node_1111111111111111:
+ if (bits & 0x8000) {
+ goto node_11111111111111111;
+ } else {
+ goto node_11111111111111110;
+ }
+
+node_11111111111111110:
+ if (bits & 0x4000) {
+ goto node_111111111111111101;
+ } else {
+ goto node_111111111111111100;
+ }
+
+node_111111111111111100:
+ if (bits & 0x2000) {
+ goto node_1111111111111111001;
+ } else {
+ goto node_1111111111111111000;
+ }
+
+node_1111111111111111000:
+ if (bits & 0x1000) {
+ goto node_11111111111111110001;
+ } else {
+ goto node_11111111111111110000;
+ }
+
+node_11111111111111110000:
+ if (bits & 0x800) {
+ *symbol = 177;
+ return 21;
+ } else {
+ *symbol = 176;
+ return 21;
+ }
+
+node_11111111111111110001:
+ if (bits & 0x800) {
+ *symbol = 209;
+ return 21;
+ } else {
+ *symbol = 179;
+ return 21;
+ }
+
+node_1111111111111111001:
+ if (bits & 0x1000) {
+ goto node_11111111111111110011;
+ } else {
+ goto node_11111111111111110010;
+ }
+
+node_11111111111111110010:
+ if (bits & 0x800) {
+ *symbol = 217;
+ return 21;
+ } else {
+ *symbol = 216;
+ return 21;
+ }
+
+node_11111111111111110011:
+ if (bits & 0x800) {
+ *symbol = 229;
+ return 21;
+ } else {
+ *symbol = 227;
+ return 21;
+ }
+
+node_111111111111111101:
+ if (bits & 0x2000) {
+ goto node_1111111111111111011;
+ } else {
+ goto node_1111111111111111010;
+ }
+
+node_1111111111111111010:
+ if (bits & 0x1000) {
+ goto node_11111111111111110101;
+ } else {
+ goto node_11111111111111110100;
+ }
+
+node_11111111111111110100:
+ if (bits & 0x800) {
+ goto node_111111111111111101001;
+ } else {
+ *symbol = 230;
+ return 21;
+ }
+
+node_111111111111111101001:
+ if (bits & 0x400) {
+ *symbol = 132;
+ return 22;
+ } else {
+ *symbol = 129;
+ return 22;
+ }
+
+node_11111111111111110101:
+ if (bits & 0x800) {
+ goto node_111111111111111101011;
+ } else {
+ goto node_111111111111111101010;
+ }
+
+node_111111111111111101010:
+ if (bits & 0x400) {
+ *symbol = 134;
+ return 22;
+ } else {
+ *symbol = 133;
+ return 22;
+ }
+
+node_111111111111111101011:
+ if (bits & 0x400) {
+ *symbol = 146;
+ return 22;
+ } else {
+ *symbol = 136;
+ return 22;
+ }
+
+node_1111111111111111011:
+ if (bits & 0x1000) {
+ goto node_11111111111111110111;
+ } else {
+ goto node_11111111111111110110;
+ }
+
+node_11111111111111110110:
+ if (bits & 0x800) {
+ goto node_111111111111111101101;
+ } else {
+ goto node_111111111111111101100;
+ }
+
+node_111111111111111101100:
+ if (bits & 0x400) {
+ *symbol = 156;
+ return 22;
+ } else {
+ *symbol = 154;
+ return 22;
+ }
+
+node_111111111111111101101:
+ if (bits & 0x400) {
+ *symbol = 163;
+ return 22;
+ } else {
+ *symbol = 160;
+ return 22;
+ }
+
+node_11111111111111110111:
+ if (bits & 0x800) {
+ goto node_111111111111111101111;
+ } else {
+ goto node_111111111111111101110;
+ }
+
+node_111111111111111101110:
+ if (bits & 0x400) {
+ *symbol = 169;
+ return 22;
+ } else {
+ *symbol = 164;
+ return 22;
+ }
+
+node_111111111111111101111:
+ if (bits & 0x400) {
+ *symbol = 173;
+ return 22;
+ } else {
+ *symbol = 170;
+ return 22;
+ }
+
+node_11111111111111111:
+ if (bits & 0x4000) {
+ goto node_111111111111111111;
+ } else {
+ goto node_111111111111111110;
+ }
+
+node_111111111111111110:
+ if (bits & 0x2000) {
+ goto node_1111111111111111101;
+ } else {
+ goto node_1111111111111111100;
+ }
+
+node_1111111111111111100:
+ if (bits & 0x1000) {
+ goto node_11111111111111111001;
+ } else {
+ goto node_11111111111111111000;
+ }
+
+node_11111111111111111000:
+ if (bits & 0x800) {
+ goto node_111111111111111110001;
+ } else {
+ goto node_111111111111111110000;
+ }
+
+node_111111111111111110000:
+ if (bits & 0x400) {
+ *symbol = 181;
+ return 22;
+ } else {
+ *symbol = 178;
+ return 22;
+ }
+
+node_111111111111111110001:
+ if (bits & 0x400) {
+ *symbol = 186;
+ return 22;
+ } else {
+ *symbol = 185;
+ return 22;
+ }
+
+node_11111111111111111001:
+ if (bits & 0x800) {
+ goto node_111111111111111110011;
+ } else {
+ goto node_111111111111111110010;
+ }
+
+node_111111111111111110010:
+ if (bits & 0x400) {
+ *symbol = 189;
+ return 22;
+ } else {
+ *symbol = 187;
+ return 22;
+ }
+
+node_111111111111111110011:
+ if (bits & 0x400) {
+ *symbol = 196;
+ return 22;
+ } else {
+ *symbol = 190;
+ return 22;
+ }
+
+node_1111111111111111101:
+ if (bits & 0x1000) {
+ goto node_11111111111111111011;
+ } else {
+ goto node_11111111111111111010;
+ }
+
+node_11111111111111111010:
+ if (bits & 0x800) {
+ goto node_111111111111111110101;
+ } else {
+ goto node_111111111111111110100;
+ }
+
+node_111111111111111110100:
+ if (bits & 0x400) {
+ *symbol = 228;
+ return 22;
+ } else {
+ *symbol = 198;
+ return 22;
+ }
+
+node_111111111111111110101:
+ if (bits & 0x400) {
+ *symbol = 233;
+ return 22;
+ } else {
+ *symbol = 232;
+ return 22;
+ }
+
+node_11111111111111111011:
+ if (bits & 0x800) {
+ goto node_111111111111111110111;
+ } else {
+ goto node_111111111111111110110;
+ }
+
+node_111111111111111110110:
+ if (bits & 0x400) {
+ goto node_1111111111111111101101;
+ } else {
+ goto node_1111111111111111101100;
+ }
+
+node_1111111111111111101100:
+ if (bits & 0x200) {
+ *symbol = 135;
+ return 23;
+ } else {
+ *symbol = 1;
+ return 23;
+ }
+
+node_1111111111111111101101:
+ if (bits & 0x200) {
+ *symbol = 138;
+ return 23;
+ } else {
+ *symbol = 137;
+ return 23;
+ }
+
+node_111111111111111110111:
+ if (bits & 0x400) {
+ goto node_1111111111111111101111;
+ } else {
+ goto node_1111111111111111101110;
+ }
+
+node_1111111111111111101110:
+ if (bits & 0x200) {
+ *symbol = 140;
+ return 23;
+ } else {
+ *symbol = 139;
+ return 23;
+ }
+
+node_1111111111111111101111:
+ if (bits & 0x200) {
+ *symbol = 143;
+ return 23;
+ } else {
+ *symbol = 141;
+ return 23;
+ }
+
+node_111111111111111111:
+ if (bits & 0x2000) {
+ goto node_1111111111111111111;
+ } else {
+ goto node_1111111111111111110;
+ }
+
+node_1111111111111111110:
+ if (bits & 0x1000) {
+ goto node_11111111111111111101;
+ } else {
+ goto node_11111111111111111100;
+ }
+
+node_11111111111111111100:
+ if (bits & 0x800) {
+ goto node_111111111111111111001;
+ } else {
+ goto node_111111111111111111000;
+ }
+
+node_111111111111111111000:
+ if (bits & 0x400) {
+ goto node_1111111111111111110001;
+ } else {
+ goto node_1111111111111111110000;
+ }
+
+node_1111111111111111110000:
+ if (bits & 0x200) {
+ *symbol = 149;
+ return 23;
+ } else {
+ *symbol = 147;
+ return 23;
+ }
+
+node_1111111111111111110001:
+ if (bits & 0x200) {
+ *symbol = 151;
+ return 23;
+ } else {
+ *symbol = 150;
+ return 23;
+ }
+
+node_111111111111111111001:
+ if (bits & 0x400) {
+ goto node_1111111111111111110011;
+ } else {
+ goto node_1111111111111111110010;
+ }
+
+node_1111111111111111110010:
+ if (bits & 0x200) {
+ *symbol = 155;
+ return 23;
+ } else {
+ *symbol = 152;
+ return 23;
+ }
+
+node_1111111111111111110011:
+ if (bits & 0x200) {
+ *symbol = 158;
+ return 23;
+ } else {
+ *symbol = 157;
+ return 23;
+ }
+
+node_11111111111111111101:
+ if (bits & 0x800) {
+ goto node_111111111111111111011;
+ } else {
+ goto node_111111111111111111010;
+ }
+
+node_111111111111111111010:
+ if (bits & 0x400) {
+ goto node_1111111111111111110101;
+ } else {
+ goto node_1111111111111111110100;
+ }
+
+node_1111111111111111110100:
+ if (bits & 0x200) {
+ *symbol = 166;
+ return 23;
+ } else {
+ *symbol = 165;
+ return 23;
+ }
+
+node_1111111111111111110101:
+ if (bits & 0x200) {
+ *symbol = 174;
+ return 23;
+ } else {
+ *symbol = 168;
+ return 23;
+ }
+
+node_111111111111111111011:
+ if (bits & 0x400) {
+ goto node_1111111111111111110111;
+ } else {
+ goto node_1111111111111111110110;
+ }
+
+node_1111111111111111110110:
+ if (bits & 0x200) {
+ *symbol = 180;
+ return 23;
+ } else {
+ *symbol = 175;
+ return 23;
+ }
+
+node_1111111111111111110111:
+ if (bits & 0x200) {
+ *symbol = 183;
+ return 23;
+ } else {
+ *symbol = 182;
+ return 23;
+ }
+
+node_1111111111111111111:
+ if (bits & 0x1000) {
+ goto node_11111111111111111111;
+ } else {
+ goto node_11111111111111111110;
+ }
+
+node_11111111111111111110:
+ if (bits & 0x800) {
+ goto node_111111111111111111101;
+ } else {
+ goto node_111111111111111111100;
+ }
+
+node_111111111111111111100:
+ if (bits & 0x400) {
+ goto node_1111111111111111111001;
+ } else {
+ goto node_1111111111111111111000;
+ }
+
+node_1111111111111111111000:
+ if (bits & 0x200) {
+ *symbol = 191;
+ return 23;
+ } else {
+ *symbol = 188;
+ return 23;
+ }
+
+node_1111111111111111111001:
+ if (bits & 0x200) {
+ *symbol = 231;
+ return 23;
+ } else {
+ *symbol = 197;
+ return 23;
+ }
+
+node_111111111111111111101:
+ if (bits & 0x400) {
+ goto node_1111111111111111111011;
+ } else {
+ goto node_1111111111111111111010;
+ }
+
+node_1111111111111111111010:
+ if (bits & 0x200) {
+ goto node_11111111111111111110101;
+ } else {
+ *symbol = 239;
+ return 23;
+ }
+
+node_11111111111111111110101:
+ if (bits & 0x100) {
+ *symbol = 142;
+ return 24;
+ } else {
+ *symbol = 9;
+ return 24;
+ }
+
+node_1111111111111111111011:
+ if (bits & 0x200) {
+ goto node_11111111111111111110111;
+ } else {
+ goto node_11111111111111111110110;
+ }
+
+node_11111111111111111110110:
+ if (bits & 0x100) {
+ *symbol = 145;
+ return 24;
+ } else {
+ *symbol = 144;
+ return 24;
+ }
+
+node_11111111111111111110111:
+ if (bits & 0x100) {
+ *symbol = 159;
+ return 24;
+ } else {
+ *symbol = 148;
+ return 24;
+ }
+
+node_11111111111111111111:
+ if (bits & 0x800) {
+ goto node_111111111111111111111;
+ } else {
+ goto node_111111111111111111110;
+ }
+
+node_111111111111111111110:
+ if (bits & 0x400) {
+ goto node_1111111111111111111101;
+ } else {
+ goto node_1111111111111111111100;
+ }
+
+node_1111111111111111111100:
+ if (bits & 0x200) {
+ goto node_11111111111111111111001;
+ } else {
+ goto node_11111111111111111111000;
+ }
+
+node_11111111111111111111000:
+ if (bits & 0x100) {
+ *symbol = 206;
+ return 24;
+ } else {
+ *symbol = 171;
+ return 24;
+ }
+
+node_11111111111111111111001:
+ if (bits & 0x100) {
+ *symbol = 225;
+ return 24;
+ } else {
+ *symbol = 215;
+ return 24;
+ }
+
+node_1111111111111111111101:
+ if (bits & 0x200) {
+ goto node_11111111111111111111011;
+ } else {
+ goto node_11111111111111111111010;
+ }
+
+node_11111111111111111111010:
+ if (bits & 0x100) {
+ *symbol = 237;
+ return 24;
+ } else {
+ *symbol = 236;
+ return 24;
+ }
+
+node_11111111111111111111011:
+ if (bits & 0x100) {
+ goto node_111111111111111111110111;
+ } else {
+ goto node_111111111111111111110110;
+ }
+
+node_111111111111111111110110:
+ if (bits & 0x80) {
+ *symbol = 207;
+ return 25;
+ } else {
+ *symbol = 199;
+ return 25;
+ }
+
+node_111111111111111111110111:
+ if (bits & 0x80) {
+ *symbol = 235;
+ return 25;
+ } else {
+ *symbol = 234;
+ return 25;
+ }
+
+node_111111111111111111111:
+ if (bits & 0x400) {
+ goto node_1111111111111111111111;
+ } else {
+ goto node_1111111111111111111110;
+ }
+
+node_1111111111111111111110:
+ if (bits & 0x200) {
+ goto node_11111111111111111111101;
+ } else {
+ goto node_11111111111111111111100;
+ }
+
+node_11111111111111111111100:
+ if (bits & 0x100) {
+ goto node_111111111111111111111001;
+ } else {
+ goto node_111111111111111111111000;
+ }
+
+node_111111111111111111111000:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110001;
+ } else {
+ goto node_1111111111111111111110000;
+ }
+
+node_1111111111111111111110000:
+ if (bits & 0x40) {
+ *symbol = 193;
+ return 26;
+ } else {
+ *symbol = 192;
+ return 26;
+ }
+
+node_1111111111111111111110001:
+ if (bits & 0x40) {
+ *symbol = 201;
+ return 26;
+ } else {
+ *symbol = 200;
+ return 26;
+ }
+
+node_111111111111111111111001:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110011;
+ } else {
+ goto node_1111111111111111111110010;
+ }
+
+node_1111111111111111111110010:
+ if (bits & 0x40) {
+ *symbol = 205;
+ return 26;
+ } else {
+ *symbol = 202;
+ return 26;
+ }
+
+node_1111111111111111111110011:
+ if (bits & 0x40) {
+ *symbol = 213;
+ return 26;
+ } else {
+ *symbol = 210;
+ return 26;
+ }
+
+node_11111111111111111111101:
+ if (bits & 0x100) {
+ goto node_111111111111111111111011;
+ } else {
+ goto node_111111111111111111111010;
+ }
+
+node_111111111111111111111010:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110101;
+ } else {
+ goto node_1111111111111111111110100;
+ }
+
+node_1111111111111111111110100:
+ if (bits & 0x40) {
+ *symbol = 219;
+ return 26;
+ } else {
+ *symbol = 218;
+ return 26;
+ }
+
+node_1111111111111111111110101:
+ if (bits & 0x40) {
+ *symbol = 240;
+ return 26;
+ } else {
+ *symbol = 238;
+ return 26;
+ }
+
+node_111111111111111111111011:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110111;
+ } else {
+ goto node_1111111111111111111110110;
+ }
+
+node_1111111111111111111110110:
+ if (bits & 0x40) {
+ *symbol = 243;
+ return 26;
+ } else {
+ *symbol = 242;
+ return 26;
+ }
+
+node_1111111111111111111110111:
+ if (bits & 0x40) {
+ goto node_11111111111111111111101111;
+ } else {
+ *symbol = 255;
+ return 26;
+ }
+
+node_11111111111111111111101111:
+ if (bits & 0x20) {
+ *symbol = 204;
+ return 27;
+ } else {
+ *symbol = 203;
+ return 27;
+ }
+
+node_1111111111111111111111:
+ if (bits & 0x200) {
+ goto node_11111111111111111111111;
+ } else {
+ goto node_11111111111111111111110;
+ }
+
+node_11111111111111111111110:
+ if (bits & 0x100) {
+ goto node_111111111111111111111101;
+ } else {
+ goto node_111111111111111111111100;
+ }
+
+node_111111111111111111111100:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111001;
+ } else {
+ goto node_1111111111111111111111000;
+ }
+
+node_1111111111111111111111000:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110001;
+ } else {
+ goto node_11111111111111111111110000;
+ }
+
+node_11111111111111111111110000:
+ if (bits & 0x20) {
+ *symbol = 212;
+ return 27;
+ } else {
+ *symbol = 211;
+ return 27;
+ }
+
+node_11111111111111111111110001:
+ if (bits & 0x20) {
+ *symbol = 221;
+ return 27;
+ } else {
+ *symbol = 214;
+ return 27;
+ }
+
+node_1111111111111111111111001:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110011;
+ } else {
+ goto node_11111111111111111111110010;
+ }
+
+node_11111111111111111111110010:
+ if (bits & 0x20) {
+ *symbol = 223;
+ return 27;
+ } else {
+ *symbol = 222;
+ return 27;
+ }
+
+node_11111111111111111111110011:
+ if (bits & 0x20) {
+ *symbol = 244;
+ return 27;
+ } else {
+ *symbol = 241;
+ return 27;
+ }
+
+node_111111111111111111111101:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111011;
+ } else {
+ goto node_1111111111111111111111010;
+ }
+
+node_1111111111111111111111010:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110101;
+ } else {
+ goto node_11111111111111111111110100;
+ }
+
+node_11111111111111111111110100:
+ if (bits & 0x20) {
+ *symbol = 246;
+ return 27;
+ } else {
+ *symbol = 245;
+ return 27;
+ }
+
+node_11111111111111111111110101:
+ if (bits & 0x20) {
+ *symbol = 248;
+ return 27;
+ } else {
+ *symbol = 247;
+ return 27;
+ }
+
+node_1111111111111111111111011:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110111;
+ } else {
+ goto node_11111111111111111111110110;
+ }
+
+node_11111111111111111111110110:
+ if (bits & 0x20) {
+ *symbol = 251;
+ return 27;
+ } else {
+ *symbol = 250;
+ return 27;
+ }
+
+node_11111111111111111111110111:
+ if (bits & 0x20) {
+ *symbol = 253;
+ return 27;
+ } else {
+ *symbol = 252;
+ return 27;
+ }
+
+node_11111111111111111111111:
+ if (bits & 0x100) {
+ goto node_111111111111111111111111;
+ } else {
+ goto node_111111111111111111111110;
+ }
+
+node_111111111111111111111110:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111101;
+ } else {
+ goto node_1111111111111111111111100;
+ }
+
+node_1111111111111111111111100:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111001;
+ } else {
+ goto node_11111111111111111111111000;
+ }
+
+node_11111111111111111111111000:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110001;
+ } else {
+ *symbol = 254;
+ return 27;
+ }
+
+node_111111111111111111111110001:
+ if (bits & 0x10) {
+ *symbol = 3;
+ return 28;
+ } else {
+ *symbol = 2;
+ return 28;
+ }
+
+node_11111111111111111111111001:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110011;
+ } else {
+ goto node_111111111111111111111110010;
+ }
+
+node_111111111111111111111110010:
+ if (bits & 0x10) {
+ *symbol = 5;
+ return 28;
+ } else {
+ *symbol = 4;
+ return 28;
+ }
+
+node_111111111111111111111110011:
+ if (bits & 0x10) {
+ *symbol = 7;
+ return 28;
+ } else {
+ *symbol = 6;
+ return 28;
+ }
+
+node_1111111111111111111111101:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111011;
+ } else {
+ goto node_11111111111111111111111010;
+ }
+
+node_11111111111111111111111010:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110101;
+ } else {
+ goto node_111111111111111111111110100;
+ }
+
+node_111111111111111111111110100:
+ if (bits & 0x10) {
+ *symbol = 11;
+ return 28;
+ } else {
+ *symbol = 8;
+ return 28;
+ }
+
+node_111111111111111111111110101:
+ if (bits & 0x10) {
+ *symbol = 14;
+ return 28;
+ } else {
+ *symbol = 12;
+ return 28;
+ }
+
+node_11111111111111111111111011:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110111;
+ } else {
+ goto node_111111111111111111111110110;
+ }
+
+node_111111111111111111111110110:
+ if (bits & 0x10) {
+ *symbol = 16;
+ return 28;
+ } else {
+ *symbol = 15;
+ return 28;
+ }
+
+node_111111111111111111111110111:
+ if (bits & 0x10) {
+ *symbol = 18;
+ return 28;
+ } else {
+ *symbol = 17;
+ return 28;
+ }
+
+node_111111111111111111111111:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111111;
+ } else {
+ goto node_1111111111111111111111110;
+ }
+
+node_1111111111111111111111110:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111101;
+ } else {
+ goto node_11111111111111111111111100;
+ }
+
+node_11111111111111111111111100:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111001;
+ } else {
+ goto node_111111111111111111111111000;
+ }
+
+node_111111111111111111111111000:
+ if (bits & 0x10) {
+ *symbol = 20;
+ return 28;
+ } else {
+ *symbol = 19;
+ return 28;
+ }
+
+node_111111111111111111111111001:
+ if (bits & 0x10) {
+ *symbol = 23;
+ return 28;
+ } else {
+ *symbol = 21;
+ return 28;
+ }
+
+node_11111111111111111111111101:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111011;
+ } else {
+ goto node_111111111111111111111111010;
+ }
+
+node_111111111111111111111111010:
+ if (bits & 0x10) {
+ *symbol = 25;
+ return 28;
+ } else {
+ *symbol = 24;
+ return 28;
+ }
+
+node_111111111111111111111111011:
+ if (bits & 0x10) {
+ *symbol = 27;
+ return 28;
+ } else {
+ *symbol = 26;
+ return 28;
+ }
+
+node_1111111111111111111111111:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111111;
+ } else {
+ goto node_11111111111111111111111110;
+ }
+
+node_11111111111111111111111110:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111101;
+ } else {
+ goto node_111111111111111111111111100;
+ }
+
+node_111111111111111111111111100:
+ if (bits & 0x10) {
+ *symbol = 29;
+ return 28;
+ } else {
+ *symbol = 28;
+ return 28;
+ }
+
+node_111111111111111111111111101:
+ if (bits & 0x10) {
+ *symbol = 31;
+ return 28;
+ } else {
+ *symbol = 30;
+ return 28;
+ }
+
+node_11111111111111111111111111:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111111;
+ } else {
+ goto node_111111111111111111111111110;
+ }
+
+node_111111111111111111111111110:
+ if (bits & 0x10) {
+ *symbol = 220;
+ return 28;
+ } else {
+ *symbol = 127;
+ return 28;
+ }
+
+node_111111111111111111111111111:
+ if (bits & 0x10) {
+ goto node_1111111111111111111111111111;
+ } else {
+ *symbol = 249;
+ return 28;
+ }
+
+node_1111111111111111111111111111:
+ if (bits & 0x8) {
+ goto node_11111111111111111111111111111;
+ } else {
+ goto node_11111111111111111111111111110;
+ }
+
+node_11111111111111111111111111110:
+ if (bits & 0x4) {
+ *symbol = 13;
+ return 30;
+ } else {
+ *symbol = 10;
+ return 30;
+ }
+
+node_11111111111111111111111111111:
+ if (bits & 0x4) {
+ return 0; /* invalid node */
+ } else {
+ *symbol = 22;
+ return 30;
+ }
+
+}
+
+struct aws_huffman_symbol_coder *hpack_get_coder(void) {
+
+ static struct aws_huffman_symbol_coder coder = {
+ .encode = encode_symbol,
+ .decode = decode_symbol,
+ .userdata = NULL,
+ };
+ return &coder;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/http.c b/contrib/restricted/aws/aws-c-http/source/http.c
new file mode 100644
index 00000000000..8a8fe92bd19
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/http.c
@@ -0,0 +1,565 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/hash_table.h>
+#include <aws/compression/compression.h>
+#include <aws/http/private/hpack.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+#include <ctype.h>
+
+#define AWS_DEFINE_ERROR_INFO_HTTP(CODE, STR) [(CODE)-0x0800] = AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-http")
+
+/* clang-format off */
+static struct aws_error_info s_errors[] = {
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_UNKNOWN,
+ "Encountered an unknown error."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_HEADER_NOT_FOUND,
+ "The specified header was not found"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_HEADER_FIELD,
+ "Invalid header field, including a forbidden header field."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_HEADER_NAME,
+ "Invalid header name."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_HEADER_VALUE,
+ "Invalid header value."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_METHOD,
+ "Method is invalid."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_PATH,
+ "Path is invalid."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_STATUS_CODE,
+ "Status code is invalid."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MISSING_BODY_STREAM,
+ "Given the provided headers (ex: Content-Length), a body is expected."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_BODY_STREAM,
+ "A body stream provided, but the message does not allow body (ex: response for HEAD Request and 304 response)"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_CLOSED,
+ "The connection has closed or is closing."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_SWITCHED_PROTOCOLS,
+ "The connection has switched protocols."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL,
+ "An unsupported protocol was encountered."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_REACTION_REQUIRED,
+ "A necessary function was not invoked from a user callback."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_DATA_NOT_AVAILABLE,
+ "This data is not yet available."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT,
+ "Amount of data streamed out does not match the previously declared length."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CALLBACK_FAILURE,
+ "A callback has reported failure."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE,
+ "Failed to upgrade HTTP connection to Websocket."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT,
+ "Websocket has sent CLOSE frame, no more data will be sent."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER,
+ "Operation cannot be performed because websocket has been converted to a midchannel handler."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_INVALID_STATE_FOR_ACQUIRE,
+ "Acquire called after the connection manager's ref count has reached zero"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW,
+ "Release called when the connection manager's vended connection count was zero"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_SERVER_CLOSED,
+ "The http server is closed, no more connections will be accepted"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED,
+ "Proxy-based connection establishment failed because the CONNECT call failed"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN,
+ "Connection acquisition failed because connection manager is shutting down"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE,
+ "Http connection channel shut down due to failure to meet throughput minimum"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROTOCOL_ERROR,
+ "Protocol rules violated by peer"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED,
+ "Connection exhausted all possible HTTP-stream IDs. Establish a new connection for new streams."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_GOAWAY_RECEIVED,
+ "Peer sent GOAWAY to initiate connection shutdown. Establish a new connection to retry the HTTP-streams."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_RST_STREAM_RECEIVED,
+ "Peer sent RST_STREAM to terminate HTTP-stream."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_RST_STREAM_SENT,
+ "RST_STREAM has sent from local implementation and HTTP-stream has been terminated."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED,
+ "HTTP-stream must be activated before use."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ "HTTP-stream has completed, action cannot be performed."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING,
+ "NTLM Proxy strategy was initiated without a challenge token"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE,
+ "Failure in user code while retrieving proxy auth token"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE,
+ "Proxy connection attempt failed but the negotiation could be continued on a new connection"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE,
+ "Internal state failure prevent connection from switching protocols"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED,
+ "Max concurrent stream reached"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN,
+ "Stream acquisition failed because stream manager is shutting down"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE,
+ "Stream acquisition failed because stream manager failed to acquire a connection"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION,
+ "Stream acquisition failed because stream manager got an unexpected version of HTTP connection"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR,
+ "Websocket protocol rules violated by peer"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED,
+ "Manual write failed because manual writes are not enabled."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED,
+ "Manual write failed because manual writes are already completed."),
+};
+/* clang-format on */
+
+static struct aws_error_info_list s_error_list = {
+ .error_list = s_errors,
+ .count = AWS_ARRAY_SIZE(s_errors),
+};
+
+static struct aws_log_subject_info s_log_subject_infos[] = {
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_GENERAL, "http", "Misc HTTP logging"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_CONNECTION, "http-connection", "HTTP client or server connection"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_ENCODER, "http-encoder", "HTTP data encoder"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_DECODER, "http-decoder", "HTTP data decoder"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_SERVER, "http-server", "HTTP server socket listening for incoming connections"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_STREAM, "http-stream", "HTTP request-response exchange"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "connection-manager", "HTTP connection manager"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_STREAM_MANAGER, "http2-stream-manager", "HTTP/2 stream manager"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET, "websocket", "Websocket"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET_SETUP, "websocket-setup", "Websocket setup"),
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "proxy-negotiation",
+ "Negotiating an http connection with a proxy server"),
+};
+
+static struct aws_log_subject_info_list s_log_subject_list = {
+ .subject_list = s_log_subject_infos,
+ .count = AWS_ARRAY_SIZE(s_log_subject_infos),
+};
+
+struct aws_enum_value {
+ struct aws_allocator *allocator;
+ int value;
+};
+
+static void s_destroy_enum_value(void *value) {
+ struct aws_enum_value *enum_value = value;
+ aws_mem_release(enum_value->allocator, enum_value);
+}
+
+/**
+ * Given array of aws_byte_cursors, init hashtable where...
+ * Key is aws_byte_cursor* (pointing into cursor from array) and comparisons are case-insensitive.
+ * Value is the array index cast to a void*.
+ */
+static void s_init_str_to_enum_hash_table(
+ struct aws_hash_table *table,
+ struct aws_allocator *alloc,
+ struct aws_byte_cursor *str_array,
+ int start_index,
+ int end_index,
+ bool ignore_case) {
+
+ int err = aws_hash_table_init(
+ table,
+ alloc,
+ end_index - start_index,
+ ignore_case ? aws_hash_byte_cursor_ptr_ignore_case : aws_hash_byte_cursor_ptr,
+ (aws_hash_callback_eq_fn *)(ignore_case ? aws_byte_cursor_eq_ignore_case : aws_byte_cursor_eq),
+ NULL,
+ s_destroy_enum_value);
+ AWS_FATAL_ASSERT(!err);
+
+ for (int i = start_index; i < end_index; ++i) {
+ int was_created = 0;
+ struct aws_enum_value *enum_value = aws_mem_calloc(alloc, 1, sizeof(struct aws_enum_value));
+ AWS_FATAL_ASSERT(enum_value);
+ enum_value->allocator = alloc;
+ enum_value->value = i;
+
+ AWS_FATAL_ASSERT(str_array[i].ptr && "Missing enum string");
+ err = aws_hash_table_put(table, &str_array[i], (void *)enum_value, &was_created);
+ AWS_FATAL_ASSERT(!err && was_created);
+ }
+}
+
+/**
+ * Given key, get value from table initialized by s_init_str_to_enum_hash_table().
+ * Returns -1 if key not found.
+ */
+static int s_find_in_str_to_enum_hash_table(const struct aws_hash_table *table, struct aws_byte_cursor *key) {
+ struct aws_hash_element *elem;
+ aws_hash_table_find(table, key, &elem);
+ if (elem) {
+ struct aws_enum_value *enum_value = elem->value;
+ return enum_value->value;
+ }
+ return -1;
+}
+
+/* METHODS */
+static struct aws_hash_table s_method_str_to_enum; /* for string -> enum lookup */
+static struct aws_byte_cursor s_method_enum_to_str[AWS_HTTP_METHOD_COUNT]; /* for enum -> string lookup */
+
+static void s_methods_init(struct aws_allocator *alloc) {
+ s_method_enum_to_str[AWS_HTTP_METHOD_GET] = aws_http_method_get;
+ s_method_enum_to_str[AWS_HTTP_METHOD_HEAD] = aws_http_method_head;
+ s_method_enum_to_str[AWS_HTTP_METHOD_CONNECT] = aws_http_method_connect;
+
+ s_init_str_to_enum_hash_table(
+ &s_method_str_to_enum,
+ alloc,
+ s_method_enum_to_str,
+ AWS_HTTP_METHOD_UNKNOWN + 1,
+ AWS_HTTP_METHOD_COUNT,
+ false /* DO NOT ignore case of method */);
+}
+
+static void s_methods_clean_up(void) {
+ aws_hash_table_clean_up(&s_method_str_to_enum);
+}
+
+enum aws_http_method aws_http_str_to_method(struct aws_byte_cursor cursor) {
+ int method = s_find_in_str_to_enum_hash_table(&s_method_str_to_enum, &cursor);
+ if (method >= 0) {
+ return (enum aws_http_method)method;
+ }
+ return AWS_HTTP_METHOD_UNKNOWN;
+}
+
+/* VERSIONS */
+static struct aws_byte_cursor s_version_enum_to_str[AWS_HTTP_HEADER_COUNT]; /* for enum -> string lookup */
+
+static void s_versions_init(struct aws_allocator *alloc) {
+ (void)alloc;
+ s_version_enum_to_str[AWS_HTTP_VERSION_UNKNOWN] = aws_byte_cursor_from_c_str("Unknown");
+ s_version_enum_to_str[AWS_HTTP_VERSION_1_0] = aws_byte_cursor_from_c_str("HTTP/1.0");
+ s_version_enum_to_str[AWS_HTTP_VERSION_1_1] = aws_byte_cursor_from_c_str("HTTP/1.1");
+ s_version_enum_to_str[AWS_HTTP_VERSION_2] = aws_byte_cursor_from_c_str("HTTP/2");
+}
+
+static void s_versions_clean_up(void) {}
+
+struct aws_byte_cursor aws_http_version_to_str(enum aws_http_version version) {
+ if ((int)version < AWS_HTTP_VERSION_UNKNOWN || (int)version >= AWS_HTTP_VERSION_COUNT) {
+ version = AWS_HTTP_VERSION_UNKNOWN;
+ }
+
+ return s_version_enum_to_str[version];
+}
+
+/* HEADERS */
+static struct aws_hash_table s_header_str_to_enum; /* for case-insensitive string -> enum lookup */
+static struct aws_hash_table s_lowercase_header_str_to_enum; /* for case-sensitive string -> enum lookup */
+static struct aws_byte_cursor s_header_enum_to_str[AWS_HTTP_HEADER_COUNT]; /* for enum -> string lookup */
+
+static void s_headers_init(struct aws_allocator *alloc) {
+ s_header_enum_to_str[AWS_HTTP_HEADER_METHOD] = aws_byte_cursor_from_c_str(":method");
+ s_header_enum_to_str[AWS_HTTP_HEADER_SCHEME] = aws_byte_cursor_from_c_str(":scheme");
+ s_header_enum_to_str[AWS_HTTP_HEADER_AUTHORITY] = aws_byte_cursor_from_c_str(":authority");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PATH] = aws_byte_cursor_from_c_str(":path");
+ s_header_enum_to_str[AWS_HTTP_HEADER_STATUS] = aws_byte_cursor_from_c_str(":status");
+ s_header_enum_to_str[AWS_HTTP_HEADER_COOKIE] = aws_byte_cursor_from_c_str("cookie");
+ s_header_enum_to_str[AWS_HTTP_HEADER_SET_COOKIE] = aws_byte_cursor_from_c_str("set-cookie");
+ s_header_enum_to_str[AWS_HTTP_HEADER_HOST] = aws_byte_cursor_from_c_str("host");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONNECTION] = aws_byte_cursor_from_c_str("connection");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_LENGTH] = aws_byte_cursor_from_c_str("content-length");
+ s_header_enum_to_str[AWS_HTTP_HEADER_EXPECT] = aws_byte_cursor_from_c_str("expect");
+ s_header_enum_to_str[AWS_HTTP_HEADER_TRANSFER_ENCODING] = aws_byte_cursor_from_c_str("transfer-encoding");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CACHE_CONTROL] = aws_byte_cursor_from_c_str("cache-control");
+ s_header_enum_to_str[AWS_HTTP_HEADER_MAX_FORWARDS] = aws_byte_cursor_from_c_str("max-forwards");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PRAGMA] = aws_byte_cursor_from_c_str("pragma");
+ s_header_enum_to_str[AWS_HTTP_HEADER_RANGE] = aws_byte_cursor_from_c_str("range");
+ s_header_enum_to_str[AWS_HTTP_HEADER_TE] = aws_byte_cursor_from_c_str("te");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_ENCODING] = aws_byte_cursor_from_c_str("content-encoding");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_TYPE] = aws_byte_cursor_from_c_str("content-type");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_RANGE] = aws_byte_cursor_from_c_str("content-range");
+ s_header_enum_to_str[AWS_HTTP_HEADER_TRAILER] = aws_byte_cursor_from_c_str("trailer");
+ s_header_enum_to_str[AWS_HTTP_HEADER_WWW_AUTHENTICATE] = aws_byte_cursor_from_c_str("www-authenticate");
+ s_header_enum_to_str[AWS_HTTP_HEADER_AUTHORIZATION] = aws_byte_cursor_from_c_str("authorization");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_AUTHENTICATE] = aws_byte_cursor_from_c_str("proxy-authenticate");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_AUTHORIZATION] = aws_byte_cursor_from_c_str("proxy-authorization");
+ s_header_enum_to_str[AWS_HTTP_HEADER_AGE] = aws_byte_cursor_from_c_str("age");
+ s_header_enum_to_str[AWS_HTTP_HEADER_EXPIRES] = aws_byte_cursor_from_c_str("expires");
+ s_header_enum_to_str[AWS_HTTP_HEADER_DATE] = aws_byte_cursor_from_c_str("date");
+ s_header_enum_to_str[AWS_HTTP_HEADER_LOCATION] = aws_byte_cursor_from_c_str("location");
+ s_header_enum_to_str[AWS_HTTP_HEADER_RETRY_AFTER] = aws_byte_cursor_from_c_str("retry-after");
+ s_header_enum_to_str[AWS_HTTP_HEADER_VARY] = aws_byte_cursor_from_c_str("vary");
+ s_header_enum_to_str[AWS_HTTP_HEADER_WARNING] = aws_byte_cursor_from_c_str("warning");
+ s_header_enum_to_str[AWS_HTTP_HEADER_UPGRADE] = aws_byte_cursor_from_c_str("upgrade");
+ s_header_enum_to_str[AWS_HTTP_HEADER_KEEP_ALIVE] = aws_byte_cursor_from_c_str("keep-alive");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_CONNECTION] = aws_byte_cursor_from_c_str("proxy-connection");
+
+ s_init_str_to_enum_hash_table(
+ &s_header_str_to_enum,
+ alloc,
+ s_header_enum_to_str,
+ AWS_HTTP_HEADER_UNKNOWN + 1,
+ AWS_HTTP_HEADER_COUNT,
+ true /* ignore case */);
+
+ s_init_str_to_enum_hash_table(
+ &s_lowercase_header_str_to_enum,
+ alloc,
+ s_header_enum_to_str,
+ AWS_HTTP_HEADER_UNKNOWN + 1,
+ AWS_HTTP_HEADER_COUNT,
+ false /* ignore case */);
+}
+
+static void s_headers_clean_up(void) {
+ aws_hash_table_clean_up(&s_header_str_to_enum);
+ aws_hash_table_clean_up(&s_lowercase_header_str_to_enum);
+}
+
+enum aws_http_header_name aws_http_str_to_header_name(struct aws_byte_cursor cursor) {
+ int header = s_find_in_str_to_enum_hash_table(&s_header_str_to_enum, &cursor);
+ if (header >= 0) {
+ return (enum aws_http_header_name)header;
+ }
+ return AWS_HTTP_HEADER_UNKNOWN;
+}
+
+enum aws_http_header_name aws_http_lowercase_str_to_header_name(struct aws_byte_cursor cursor) {
+ int header = s_find_in_str_to_enum_hash_table(&s_lowercase_header_str_to_enum, &cursor);
+ if (header >= 0) {
+ return (enum aws_http_header_name)header;
+ }
+ return AWS_HTTP_HEADER_UNKNOWN;
+}
+
+/* STATUS */
+const char *aws_http_status_text(int status_code) {
+ /**
+ * Data from Internet Assigned Numbers Authority (IANA):
+ * https://www.iana.org/assignments/http-status-codes/http-status-codes.txt
+ */
+ switch (status_code) {
+ case AWS_HTTP_STATUS_CODE_100_CONTINUE:
+ return "Continue";
+ case AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS:
+ return "Switching Protocols";
+ case AWS_HTTP_STATUS_CODE_102_PROCESSING:
+ return "Processing";
+ case AWS_HTTP_STATUS_CODE_103_EARLY_HINTS:
+ return "Early Hints";
+ case AWS_HTTP_STATUS_CODE_200_OK:
+ return "OK";
+ case AWS_HTTP_STATUS_CODE_201_CREATED:
+ return "Created";
+ case AWS_HTTP_STATUS_CODE_202_ACCEPTED:
+ return "Accepted";
+ case AWS_HTTP_STATUS_CODE_203_NON_AUTHORITATIVE_INFORMATION:
+ return "Non-Authoritative Information";
+ case AWS_HTTP_STATUS_CODE_204_NO_CONTENT:
+ return "No Content";
+ case AWS_HTTP_STATUS_CODE_205_RESET_CONTENT:
+ return "Reset Content";
+ case AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT:
+ return "Partial Content";
+ case AWS_HTTP_STATUS_CODE_207_MULTI_STATUS:
+ return "Multi-Status";
+ case AWS_HTTP_STATUS_CODE_208_ALREADY_REPORTED:
+ return "Already Reported";
+ case AWS_HTTP_STATUS_CODE_226_IM_USED:
+ return "IM Used";
+ case AWS_HTTP_STATUS_CODE_300_MULTIPLE_CHOICES:
+ return "Multiple Choices";
+ case AWS_HTTP_STATUS_CODE_301_MOVED_PERMANENTLY:
+ return "Moved Permanently";
+ case AWS_HTTP_STATUS_CODE_302_FOUND:
+ return "Found";
+ case AWS_HTTP_STATUS_CODE_303_SEE_OTHER:
+ return "See Other";
+ case AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED:
+ return "Not Modified";
+ case AWS_HTTP_STATUS_CODE_305_USE_PROXY:
+ return "Use Proxy";
+ case AWS_HTTP_STATUS_CODE_307_TEMPORARY_REDIRECT:
+ return "Temporary Redirect";
+ case AWS_HTTP_STATUS_CODE_308_PERMANENT_REDIRECT:
+ return "Permanent Redirect";
+ case AWS_HTTP_STATUS_CODE_400_BAD_REQUEST:
+ return "Bad Request";
+ case AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED:
+ return "Unauthorized";
+ case AWS_HTTP_STATUS_CODE_402_PAYMENT_REQUIRED:
+ return "Payment Required";
+ case AWS_HTTP_STATUS_CODE_403_FORBIDDEN:
+ return "Forbidden";
+ case AWS_HTTP_STATUS_CODE_404_NOT_FOUND:
+ return "Not Found";
+ case AWS_HTTP_STATUS_CODE_405_METHOD_NOT_ALLOWED:
+ return "Method Not Allowed";
+ case AWS_HTTP_STATUS_CODE_406_NOT_ACCEPTABLE:
+ return "Not Acceptable";
+ case AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED:
+ return "Proxy Authentication Required";
+ case AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT:
+ return "Request Timeout";
+ case AWS_HTTP_STATUS_CODE_409_CONFLICT:
+ return "Conflict";
+ case AWS_HTTP_STATUS_CODE_410_GONE:
+ return "Gone";
+ case AWS_HTTP_STATUS_CODE_411_LENGTH_REQUIRED:
+ return "Length Required";
+ case AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED:
+ return "Precondition Failed";
+ case AWS_HTTP_STATUS_CODE_413_REQUEST_ENTITY_TOO_LARGE:
+ return "Payload Too Large";
+ case AWS_HTTP_STATUS_CODE_414_REQUEST_URI_TOO_LONG:
+ return "URI Too Long";
+ case AWS_HTTP_STATUS_CODE_415_UNSUPPORTED_MEDIA_TYPE:
+ return "Unsupported Media Type";
+ case AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE:
+ return "Range Not Satisfiable";
+ case AWS_HTTP_STATUS_CODE_417_EXPECTATION_FAILED:
+ return "Expectation Failed";
+ case AWS_HTTP_STATUS_CODE_421_MISDIRECTED_REQUEST:
+ return "Misdirected Request";
+ case AWS_HTTP_STATUS_CODE_422_UNPROCESSABLE_ENTITY:
+ return "Unprocessable Entity";
+ case AWS_HTTP_STATUS_CODE_423_LOCKED:
+ return "Locked";
+ case AWS_HTTP_STATUS_CODE_424_FAILED_DEPENDENCY:
+ return "Failed Dependency";
+ case AWS_HTTP_STATUS_CODE_425_TOO_EARLY:
+ return "Too Early";
+ case AWS_HTTP_STATUS_CODE_426_UPGRADE_REQUIRED:
+ return "Upgrade Required";
+ case AWS_HTTP_STATUS_CODE_428_PRECONDITION_REQUIRED:
+ return "Precondition Required";
+ case AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS:
+ return "Too Many Requests";
+ case AWS_HTTP_STATUS_CODE_431_REQUEST_HEADER_FIELDS_TOO_LARGE:
+ return "Request Header Fields Too Large";
+ case AWS_HTTP_STATUS_CODE_451_UNAVAILABLE_FOR_LEGAL_REASON:
+ return "Unavailable For Legal Reasons";
+ case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR:
+ return "Internal Server Error";
+ case AWS_HTTP_STATUS_CODE_501_NOT_IMPLEMENTED:
+ return "Not Implemented";
+ case AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY:
+ return "Bad Gateway";
+ case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE:
+ return "Service Unavailable";
+ case AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT:
+ return "Gateway Timeout";
+ case AWS_HTTP_STATUS_CODE_505_HTTP_VERSION_NOT_SUPPORTED:
+ return "HTTP Version Not Supported";
+ case AWS_HTTP_STATUS_CODE_506_VARIANT_ALSO_NEGOTIATES:
+ return "Variant Also Negotiates";
+ case AWS_HTTP_STATUS_CODE_507_INSUFFICIENT_STORAGE:
+ return "Insufficient Storage";
+ case AWS_HTTP_STATUS_CODE_508_LOOP_DETECTED:
+ return "Loop Detected";
+ case AWS_HTTP_STATUS_CODE_510_NOT_EXTENDED:
+ return "Not Extended";
+ case AWS_HTTP_STATUS_CODE_511_NETWORK_AUTHENTICATION_REQUIRED:
+ return "Network Authentication Required";
+ default:
+ return "";
+ }
+}
+
+static bool s_library_initialized = false;
+void aws_http_library_init(struct aws_allocator *alloc) {
+ if (s_library_initialized) {
+ return;
+ }
+ s_library_initialized = true;
+
+ aws_io_library_init(alloc);
+ aws_compression_library_init(alloc);
+ aws_register_error_info(&s_error_list);
+ aws_register_log_subject_info_list(&s_log_subject_list);
+ s_methods_init(alloc);
+ s_headers_init(alloc);
+ s_versions_init(alloc);
+ aws_hpack_static_table_init(alloc);
+}
+
+void aws_http_library_clean_up(void) {
+ if (!s_library_initialized) {
+ return;
+ }
+ s_library_initialized = false;
+
+ aws_thread_join_all_managed();
+ aws_unregister_error_info(&s_error_list);
+ aws_unregister_log_subject_info_list(&s_log_subject_list);
+ s_methods_clean_up();
+ s_headers_clean_up();
+ s_versions_clean_up();
+ aws_hpack_static_table_clean_up();
+ aws_compression_library_clean_up();
+ aws_io_library_clean_up();
+}
+
+void aws_http_fatal_assert_library_initialized() {
+ if (!s_library_initialized) {
+ AWS_LOGF_FATAL(
+ AWS_LS_HTTP_GENERAL,
+ "aws_http_library_init() must be called before using any functionality in aws-c-http.");
+
+ AWS_FATAL_ASSERT(s_library_initialized);
+ }
+}
+
+const struct aws_byte_cursor aws_http_method_get = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET");
+const struct aws_byte_cursor aws_http_method_head = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD");
+const struct aws_byte_cursor aws_http_method_post = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST");
+const struct aws_byte_cursor aws_http_method_put = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PUT");
+const struct aws_byte_cursor aws_http_method_delete = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DELETE");
+const struct aws_byte_cursor aws_http_method_connect = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CONNECT");
+const struct aws_byte_cursor aws_http_method_options = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("OPTIONS");
+
+const struct aws_byte_cursor aws_http_header_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":method");
+const struct aws_byte_cursor aws_http_header_scheme = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":scheme");
+const struct aws_byte_cursor aws_http_header_authority = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":authority");
+const struct aws_byte_cursor aws_http_header_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":path");
+const struct aws_byte_cursor aws_http_header_status = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":status");
+
+const struct aws_byte_cursor aws_http_scheme_http = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http");
+const struct aws_byte_cursor aws_http_scheme_https = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https");
diff --git a/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c b/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
new file mode 100644
index 00000000000..fb23199376c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
@@ -0,0 +1,1238 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/clock.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/logging.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+
+#include <aws/http/http2_stream_manager.h>
+#include <aws/http/private/http2_stream_manager_impl.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/http/status_code.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */
+#undef DEBUG
+
+#define STREAM_MANAGER_LOGF(level, stream_manager, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_STREAM_MANAGER, "id=%p: " text, (void *)(stream_manager), __VA_ARGS__)
+#define STREAM_MANAGER_LOG(level, stream_manager, text) STREAM_MANAGER_LOGF(level, stream_manager, "%s", text)
+
+/* 3 seconds */
+static const size_t s_default_ping_timeout_ms = 3000;
+
+static void s_stream_manager_start_destroy(struct aws_http2_stream_manager *stream_manager);
+static void s_aws_http2_stream_manager_build_transaction_synced(struct aws_http2_stream_management_transaction *work);
+static void s_aws_http2_stream_manager_execute_transaction(struct aws_http2_stream_management_transaction *work);
+
+static struct aws_h2_sm_pending_stream_acquisition *s_new_pending_stream_acquisition(
+ struct aws_allocator *allocator,
+ const struct aws_http_make_request_options *options,
+ aws_http2_stream_manager_on_stream_acquired_fn *callback,
+ void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_sm_pending_stream_acquisition));
+
+ /* Copy the options and keep the underlying message alive */
+ pending_stream_acquisition->options = *options;
+ pending_stream_acquisition->request = options->request;
+ aws_http_message_acquire(pending_stream_acquisition->request);
+ pending_stream_acquisition->callback = callback;
+ pending_stream_acquisition->user_data = user_data;
+ pending_stream_acquisition->allocator = allocator;
+ return pending_stream_acquisition;
+}
+
+static void s_pending_stream_acquisition_destroy(
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition) {
+ if (pending_stream_acquisition == NULL) {
+ return;
+ }
+ if (pending_stream_acquisition->request) {
+ aws_http_message_release(pending_stream_acquisition->request);
+ }
+ aws_mem_release(pending_stream_acquisition->allocator, pending_stream_acquisition);
+}
+
+static void s_lock_synced_data(struct aws_http2_stream_manager *stream_manager) {
+ int err = aws_mutex_lock(&stream_manager->synced_data.lock);
+ AWS_ASSERT(!err && "lock failed");
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_http2_stream_manager *stream_manager) {
+ int err = aws_mutex_unlock(&stream_manager->synced_data.lock);
+ AWS_ASSERT(!err && "unlock failed");
+ (void)err;
+}
+
+static void s_sm_log_stats_synced(struct aws_http2_stream_manager *stream_manager) {
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "Stream manager internal counts status: "
+ "connection acquiring=%zu, streams opening=%zu, pending make request count=%zu, pending acquisition count=%zu, "
+ "holding connections count=%zu",
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_MAKE_REQUESTS],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION],
+ stream_manager->synced_data.holding_connections_count);
+}
+
+/* The count acquire and release all needs to be invoked helding the lock */
+static void s_sm_count_increase_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ enum aws_sm_count_type count_type,
+ size_t num) {
+ stream_manager->synced_data.internal_refcount_stats[count_type] += num;
+ for (size_t i = 0; i < num; i++) {
+ aws_ref_count_acquire(&stream_manager->internal_ref_count);
+ }
+}
+
+static void s_sm_count_decrease_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ enum aws_sm_count_type count_type,
+ size_t num) {
+ stream_manager->synced_data.internal_refcount_stats[count_type] -= num;
+ for (size_t i = 0; i < num; i++) {
+ aws_ref_count_release(&stream_manager->internal_ref_count);
+ }
+}
+
+static void s_aws_stream_management_transaction_init(
+ struct aws_http2_stream_management_transaction *work,
+ struct aws_http2_stream_manager *stream_manager) {
+ AWS_ZERO_STRUCT(*work);
+ aws_linked_list_init(&work->pending_make_requests);
+ work->stream_manager = stream_manager;
+ work->allocator = stream_manager->allocator;
+ aws_ref_count_acquire(&stream_manager->internal_ref_count);
+}
+
+static void s_aws_stream_management_transaction_clean_up(struct aws_http2_stream_management_transaction *work) {
+ (void)work;
+ AWS_ASSERT(aws_linked_list_empty(&work->pending_make_requests));
+ aws_ref_count_release(&work->stream_manager->internal_ref_count);
+}
+
+static struct aws_h2_sm_connection *s_get_best_sm_connection_from_set(struct aws_random_access_set *set) {
+ /* Use the best two algorithm */
+ int errored = AWS_ERROR_SUCCESS;
+ struct aws_h2_sm_connection *sm_connection_a = NULL;
+ errored = aws_random_access_set_random_get_ptr(set, (void **)&sm_connection_a);
+ struct aws_h2_sm_connection *sm_connection_b = NULL;
+ errored |= aws_random_access_set_random_get_ptr(set, (void **)&sm_connection_b);
+ struct aws_h2_sm_connection *chosen_connection =
+ sm_connection_a->num_streams_assigned > sm_connection_b->num_streams_assigned ? sm_connection_b
+ : sm_connection_a;
+ return errored == AWS_ERROR_SUCCESS ? chosen_connection : NULL;
+ (void)errored;
+}
+
+/* helper function for building the transaction: Try to assign connection for a pending stream acquisition */
+/* *_synced should only be called with LOCK HELD or from another synced function */
+static void s_sm_try_assign_connection_to_pending_stream_acquisition_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition) {
+
+ AWS_ASSERT(pending_stream_acquisition->sm_connection == NULL);
+ int errored = 0;
+ if (aws_random_access_set_get_size(&stream_manager->synced_data.ideal_available_set)) {
+ /**
+ * Try assigning to connection from ideal set
+ */
+ struct aws_h2_sm_connection *chosen_connection =
+ s_get_best_sm_connection_from_set(&stream_manager->synced_data.ideal_available_set);
+ AWS_ASSERT(chosen_connection);
+ pending_stream_acquisition->sm_connection = chosen_connection;
+ chosen_connection->num_streams_assigned++;
+
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "Picking connection:%p for acquisition:%p. Streams assigned to the connection=%" PRIu32 "",
+ (void *)chosen_connection->connection,
+ (void *)pending_stream_acquisition,
+ chosen_connection->num_streams_assigned);
+ /* Check if connection is still available or ideal, and move it if it's not */
+ if (chosen_connection->num_streams_assigned >= chosen_connection->max_concurrent_streams) {
+ /* It becomes not available for new streams any more, remove it from the set, but still alive (streams
+ * created will track the lifetime) */
+ chosen_connection->state = AWS_H2SMCST_FULL;
+ errored |=
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, chosen_connection);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection:%p reaches max concurrent streams limits. "
+ "Connection max limits=%" PRIu32 ". Moving it out of available connections.",
+ (void *)chosen_connection->connection,
+ chosen_connection->max_concurrent_streams);
+ } else if (chosen_connection->num_streams_assigned >= stream_manager->ideal_concurrent_streams_per_connection) {
+ /* It meets the ideal limit, but still available for new streams, move it to the nonidea-available set */
+ errored |=
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, chosen_connection);
+ bool added = false;
+ errored |= aws_random_access_set_add(
+ &stream_manager->synced_data.nonideal_available_set, chosen_connection, &added);
+ errored |= !added;
+ chosen_connection->state = AWS_H2SMCST_NEARLY_FULL;
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection:%p reaches ideal concurrent streams limits. Ideal limits=%zu. Moving it to nonlimited set.",
+ (void *)chosen_connection->connection,
+ stream_manager->ideal_concurrent_streams_per_connection);
+ }
+ } else if (stream_manager->synced_data.holding_connections_count == stream_manager->max_connections) {
+ /**
+ * Try assigning to connection from nonideal available set.
+ *
+ * Note that we do not assign to nonideal connections until we're holding all the connections we can ever
+ * possibly get. This way, we don't overfill the first connections we get our hands on.
+ */
+
+ if (aws_random_access_set_get_size(&stream_manager->synced_data.nonideal_available_set)) {
+ struct aws_h2_sm_connection *chosen_connection =
+ s_get_best_sm_connection_from_set(&stream_manager->synced_data.nonideal_available_set);
+ AWS_ASSERT(chosen_connection);
+ pending_stream_acquisition->sm_connection = chosen_connection;
+ chosen_connection->num_streams_assigned++;
+
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "Picking connection:%p for acquisition:%p. Streams assigned to the connection=%" PRIu32 "",
+ (void *)chosen_connection->connection,
+ (void *)pending_stream_acquisition,
+ chosen_connection->num_streams_assigned);
+
+ if (chosen_connection->num_streams_assigned >= chosen_connection->max_concurrent_streams) {
+ /* It becomes not available for new streams any more, remove it from the set, but still alive (streams
+ * created will track the lifetime) */
+ chosen_connection->state = AWS_H2SMCST_FULL;
+ errored |= aws_random_access_set_remove(
+ &stream_manager->synced_data.nonideal_available_set, chosen_connection);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection %p reaches max concurrent streams limits. "
+ "Connection max limits=%" PRIu32 ". Moving it out of available connections.",
+ (void *)chosen_connection->connection,
+ chosen_connection->max_concurrent_streams);
+ }
+ }
+ }
+ AWS_ASSERT(errored == 0 && "random access set went wrong");
+ (void)errored;
+}
+
+/* NOTE: never invoke with lock held */
+static void s_finish_pending_stream_acquisitions_list_helper(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_linked_list *pending_stream_acquisitions,
+ int error_code) {
+ while (!aws_linked_list_empty(pending_stream_acquisitions)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(pending_stream_acquisitions);
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node);
+ /* Make sure no connection assigned. */
+ AWS_ASSERT(pending_stream_acquisition->sm_connection == NULL);
+ if (pending_stream_acquisition->callback) {
+ pending_stream_acquisition->callback(NULL, error_code, pending_stream_acquisition->user_data);
+ }
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "acquisition:%p failed with error: %d(%s)",
+ (void *)pending_stream_acquisition,
+ error_code,
+ aws_error_str(error_code));
+ s_pending_stream_acquisition_destroy(pending_stream_acquisition);
+ }
+}
+
+/* This is scheduled to run on a separate event loop to finish pending acquisition asynchronously */
+static void s_finish_pending_stream_acquisitions_task(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)status;
+ struct aws_http2_stream_manager *stream_manager = arg;
+ STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager final task runs");
+ struct aws_http2_stream_management_transaction work;
+ struct aws_linked_list pending_stream_acquisitions;
+ aws_linked_list_init(&pending_stream_acquisitions);
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ AWS_ASSERT(stream_manager->synced_data.state == AWS_H2SMST_DESTROYING);
+ /* swap list to avoid callback with lock held. */
+ aws_linked_list_swap_contents(
+ &pending_stream_acquisitions, &stream_manager->synced_data.pending_stream_acquisitions);
+ /* After the callbacks invoked, now we can update the count */
+ s_sm_count_decrease_synced(
+ stream_manager,
+ AWS_SMCT_PENDING_ACQUISITION,
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]);
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_finish_pending_stream_acquisitions_list_helper(
+ stream_manager, &pending_stream_acquisitions, AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN);
+ aws_mem_release(stream_manager->allocator, task);
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+/* helper function for building the transaction: how many new connections we should request */
+static void s_check_new_connections_needed_synced(struct aws_http2_stream_management_transaction *work) {
+ struct aws_http2_stream_manager *stream_manager = work->stream_manager;
+ /* The ideal new connection we need to fit all the pending stream acquisitions */
+ size_t ideal_new_connection_count =
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] /
+ stream_manager->ideal_concurrent_streams_per_connection;
+ /* Rounding up */
+ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] %
+ stream_manager->ideal_concurrent_streams_per_connection) {
+ ++ideal_new_connection_count;
+ }
+ /* The ideal new connections sub the number of connections we are acquiring to avoid the async acquiring */
+ work->new_connections = aws_sub_size_saturating(
+ ideal_new_connection_count,
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING]);
+ /* The real number we can have is the min of how many more we can still have and how many we need */
+ size_t new_connections_available =
+ stream_manager->max_connections - stream_manager->synced_data.holding_connections_count -
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING];
+ work->new_connections = aws_min_size(new_connections_available, work->new_connections);
+ /* Update the number of connections we acquiring */
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_CONNECTIONS_ACQUIRING, work->new_connections);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "number of acquisition that waiting for connections to use=%zu. connection acquiring=%zu, connection held=%zu, "
+ "max connection=%zu",
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING],
+ stream_manager->synced_data.holding_connections_count,
+ stream_manager->max_connections);
+}
+
+/**
+ * It can be invoked from:
+ * - User release last refcount of stream manager
+ * - User acquires stream from stream manager
+ * - Connection acquired callback from connection manager
+ * - Stream completed callback from HTTP
+ */
+/* *_synced should only be called with LOCK HELD or from another synced function */
+static void s_aws_http2_stream_manager_build_transaction_synced(struct aws_http2_stream_management_transaction *work) {
+ struct aws_http2_stream_manager *stream_manager = work->stream_manager;
+ if (stream_manager->synced_data.state == AWS_H2SMST_READY) {
+
+ /* Steps 1: Pending acquisitions of stream */
+ while (!aws_linked_list_empty(&stream_manager->synced_data.pending_stream_acquisitions)) {
+ struct aws_linked_list_node *node =
+ aws_linked_list_pop_front(&stream_manager->synced_data.pending_stream_acquisitions);
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node);
+ s_sm_try_assign_connection_to_pending_stream_acquisition_synced(stream_manager, pending_stream_acquisition);
+ if (pending_stream_acquisition->sm_connection == NULL) {
+ /* Cannot find any connection, push it back to the front and break the loop */
+ aws_linked_list_push_front(&stream_manager->synced_data.pending_stream_acquisitions, node);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "acquisition:%p cannot find any connection to use.",
+ (void *)pending_stream_acquisition);
+ break;
+ } else {
+ /* found connection for the request. Move it to pending make requests and update the count */
+ aws_linked_list_push_back(&work->pending_make_requests, node);
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, 1);
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_PENDING_MAKE_REQUESTS, 1);
+ }
+ }
+
+ /* Step 2: Check for new connections needed */
+ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]) {
+ s_check_new_connections_needed_synced(work);
+ }
+
+ } else {
+ /* Stream manager is shutting down */
+ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] &&
+ !stream_manager->synced_data.finish_pending_stream_acquisitions_task_scheduled) {
+ /* schedule a task to finish the pending acquisitions if there doesn't have one and needed */
+ stream_manager->finish_pending_stream_acquisitions_task_event_loop =
+ aws_event_loop_group_get_next_loop(stream_manager->bootstrap->event_loop_group);
+ struct aws_task *finish_pending_stream_acquisitions_task =
+ aws_mem_calloc(stream_manager->allocator, 1, sizeof(struct aws_task));
+ aws_task_init(
+ finish_pending_stream_acquisitions_task,
+ s_finish_pending_stream_acquisitions_task,
+ stream_manager,
+ "sm_finish_pending_stream_acquisitions");
+ aws_event_loop_schedule_task_now(
+ stream_manager->finish_pending_stream_acquisitions_task_event_loop,
+ finish_pending_stream_acquisitions_task);
+ stream_manager->synced_data.finish_pending_stream_acquisitions_task_scheduled = true;
+ }
+ }
+ s_sm_log_stats_synced(stream_manager);
+}
+
+static void s_on_ping_complete(
+ struct aws_http_connection *http2_connection,
+ uint64_t round_trip_time_ns,
+ int error_code,
+ void *user_data) {
+
+ (void)http2_connection;
+ struct aws_h2_sm_connection *sm_connection = user_data;
+ if (error_code) {
+ goto done;
+ }
+ if (!sm_connection->connection) {
+ goto done;
+ }
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ sm_connection->stream_manager,
+ "PING ACK received for connection: %p. Round trip time in ns is: %" PRIu64 ".",
+ (void *)sm_connection->connection,
+ round_trip_time_ns);
+ sm_connection->thread_data.ping_received = true;
+
+done:
+ /* Release refcount held for ping complete */
+ aws_ref_count_release(&sm_connection->ref_count);
+}
+
+static void s_connection_ping_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ (void)status;
+ struct aws_h2_sm_connection *sm_connection = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto done;
+ }
+ if (!sm_connection->connection) {
+ /* The connection has been released before timeout happens, just release the refcount */
+ goto done;
+ }
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+ if (!sm_connection->thread_data.ping_received) {
+ /* Timeout happened */
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ sm_connection->stream_manager,
+ "ping timeout detected for connection: %p, closing connection.",
+ (void *)sm_connection->connection);
+
+ aws_http_connection_close(sm_connection->connection);
+ } else {
+ struct aws_channel *channel = aws_http_connection_get_channel(sm_connection->connection);
+ /* acquire a refcount for next set of tasks to run */
+ aws_ref_count_acquire(&sm_connection->ref_count);
+ aws_channel_schedule_task_future(
+ channel, &sm_connection->ping_task, sm_connection->thread_data.next_ping_task_time);
+ }
+done:
+ /* Release refcount for current set of tasks */
+ aws_ref_count_release(&sm_connection->ref_count);
+}
+
+static void s_connection_ping_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ (void)status;
+ struct aws_h2_sm_connection *sm_connection = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ aws_ref_count_release(&sm_connection->ref_count);
+ return;
+ }
+ if (!sm_connection->connection) {
+ /* The connection has been released before ping task, just release the refcount */
+ aws_ref_count_release(&sm_connection->ref_count);
+ return;
+ }
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+
+ STREAM_MANAGER_LOGF(
+ TRACE, sm_connection->stream_manager, "Sending PING for connection: %p.", (void *)sm_connection->connection);
+ aws_http2_connection_ping(sm_connection->connection, NULL, s_on_ping_complete, sm_connection);
+ /* Acquire refcount for PING complete to be invoked. */
+ aws_ref_count_acquire(&sm_connection->ref_count);
+ sm_connection->thread_data.ping_received = false;
+
+ /* schedule timeout task */
+ struct aws_channel *channel = aws_http_connection_get_channel(sm_connection->connection);
+ uint64_t current_time = 0;
+ aws_channel_current_clock_time(channel, &current_time);
+ sm_connection->thread_data.next_ping_task_time =
+ current_time + sm_connection->stream_manager->connection_ping_period_ns;
+ uint64_t timeout_time = current_time + sm_connection->stream_manager->connection_ping_timeout_ns;
+ aws_channel_task_init(
+ &sm_connection->ping_timeout_task,
+ s_connection_ping_timeout_task,
+ sm_connection,
+ "Stream manager connection ping timeout task");
+ /* keep the refcount for timeout task to run */
+ aws_channel_schedule_task_future(channel, &sm_connection->ping_timeout_task, timeout_time);
+}
+
+static void s_sm_connection_destroy(void *user_data) {
+ struct aws_h2_sm_connection *sm_connection = user_data;
+ aws_mem_release(sm_connection->allocator, sm_connection);
+}
+
+static struct aws_h2_sm_connection *s_sm_connection_new(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_http_connection *connection) {
+ struct aws_h2_sm_connection *sm_connection =
+ aws_mem_calloc(stream_manager->allocator, 1, sizeof(struct aws_h2_sm_connection));
+ sm_connection->allocator = stream_manager->allocator;
+ /* Max concurrent stream reached, we need to update the max for the sm_connection */
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT];
+ /* The setting id equals to the index plus one. */
+ aws_http2_connection_get_remote_settings(connection, out_settings);
+ uint32_t remote_max_con_streams = out_settings[AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS - 1].value;
+ sm_connection->max_concurrent_streams =
+ aws_min_u32((uint32_t)stream_manager->max_concurrent_streams_per_connection, remote_max_con_streams);
+ sm_connection->connection = connection;
+ sm_connection->stream_manager = stream_manager;
+ sm_connection->state = AWS_H2SMCST_IDEAL;
+ aws_ref_count_init(&sm_connection->ref_count, sm_connection, s_sm_connection_destroy);
+ if (stream_manager->connection_ping_period_ns) {
+ struct aws_channel *channel = aws_http_connection_get_channel(connection);
+ uint64_t schedule_time = 0;
+ aws_channel_current_clock_time(channel, &schedule_time);
+ schedule_time += stream_manager->connection_ping_period_ns;
+ aws_channel_task_init(
+ &sm_connection->ping_task, s_connection_ping_task, sm_connection, "Stream manager connection ping task");
+ /* Keep a refcount on sm_connection for the task to run. */
+ aws_ref_count_acquire(&sm_connection->ref_count);
+ aws_channel_schedule_task_future(channel, &sm_connection->ping_task, schedule_time);
+ }
+ return sm_connection;
+}
+
+static void s_sm_connection_release_connection(struct aws_h2_sm_connection *sm_connection) {
+ AWS_ASSERT(sm_connection->num_streams_assigned == 0);
+ if (sm_connection->connection) {
+ /* Should only be invoked from the connection thread. */
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+ int error = aws_http_connection_manager_release_connection(
+ sm_connection->stream_manager->connection_manager, sm_connection->connection);
+ AWS_ASSERT(!error);
+ (void)error;
+ sm_connection->connection = NULL;
+ }
+ aws_ref_count_release(&sm_connection->ref_count);
+}
+
+static void s_sm_on_connection_acquired_failed_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_linked_list *stream_acquisitions_to_fail) {
+
+ /* Once we failed to acquire a connection, we fail the stream acquisitions that cannot fit into the remaining
+ * acquiring connections. */
+ size_t num_can_fit = aws_mul_size_saturating(
+ stream_manager->ideal_concurrent_streams_per_connection,
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING]);
+ size_t num_to_fail = aws_sub_size_saturating(
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION], num_can_fit);
+ /* Get a list to fail instead of fail them with in the lock. */
+ for (size_t i = 0; i < num_to_fail; i++) {
+ struct aws_linked_list_node *node =
+ aws_linked_list_pop_front(&stream_manager->synced_data.pending_stream_acquisitions);
+ aws_linked_list_push_back(stream_acquisitions_to_fail, node);
+ }
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, num_to_fail);
+}
+
+static void s_sm_on_connection_acquired(struct aws_http_connection *connection, int error_code, void *user_data) {
+ struct aws_http2_stream_manager *stream_manager = user_data;
+ struct aws_http2_stream_management_transaction work;
+ STREAM_MANAGER_LOGF(TRACE, stream_manager, "connection=%p acquired from connection manager", (void *)connection);
+ int re_error = 0;
+ int stream_fail_error_code = AWS_ERROR_SUCCESS;
+ bool should_release_connection = false;
+ struct aws_linked_list stream_acquisitions_to_fail;
+ aws_linked_list_init(&stream_acquisitions_to_fail);
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_CONNECTIONS_ACQUIRING, 1);
+ if (error_code || !connection) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "connection acquired from connection manager failed, with error: %d(%s)",
+ error_code,
+ aws_error_str(error_code));
+ s_sm_on_connection_acquired_failed_synced(stream_manager, &stream_acquisitions_to_fail);
+ stream_fail_error_code = AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE;
+ } else if (aws_http_connection_get_version(connection) != AWS_HTTP_VERSION_2) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "Unexpected HTTP version acquired, release the connection=%p acquired immediately",
+ (void *)connection);
+ should_release_connection = true;
+ s_sm_on_connection_acquired_failed_synced(stream_manager, &stream_acquisitions_to_fail);
+ stream_fail_error_code = AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION;
+ } else if (stream_manager->synced_data.state != AWS_H2SMST_READY) {
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "shutting down, release the connection=%p acquired immediately",
+ (void *)connection);
+ /* Release the acquired connection */
+ should_release_connection = true;
+ } else if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] == 0) {
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "No pending acquisition, release the connection=%p acquired immediately",
+ (void *)connection);
+ /* Release the acquired connection */
+ should_release_connection = true;
+ } else {
+ struct aws_h2_sm_connection *sm_connection = s_sm_connection_new(stream_manager, connection);
+ bool added = false;
+ re_error |=
+ aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added);
+ re_error |= !added;
+ ++stream_manager->synced_data.holding_connections_count;
+ }
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+
+ if (should_release_connection) {
+ STREAM_MANAGER_LOGF(DEBUG, stream_manager, "Releasing connection: %p", (void *)connection);
+ re_error |= aws_http_connection_manager_release_connection(stream_manager->connection_manager, connection);
+ }
+
+ AWS_ASSERT(!re_error && "connection acquired callback fails with programming errors");
+ (void)re_error;
+
+ /* Fail acquisitions if any */
+ s_finish_pending_stream_acquisitions_list_helper(
+ stream_manager, &stream_acquisitions_to_fail, stream_fail_error_code);
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+static int s_on_incoming_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection;
+ struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager;
+
+ if (pending_stream_acquisition->options.on_response_headers) {
+ return pending_stream_acquisition->options.on_response_headers(
+ stream, header_block, header_array, num_headers, pending_stream_acquisition->options.user_data);
+ }
+ if (stream_manager->close_connection_on_server_error) {
+ /* Check status code if stream completed successfully. */
+ int status_code = 0;
+ aws_http_stream_get_incoming_response_status(stream, &status_code);
+ AWS_ASSERT(status_code != 0); /* The get status should not fail */
+ switch (status_code) {
+ case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR:
+ case AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY:
+ case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE:
+ case AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT:
+ /* For those error code if the retry happens, it should not use the same connection. */
+ if (!sm_connection->thread_data.stopped_new_requests) {
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "no longer using connection: %p due to receiving %d server error status code for stream: %p",
+ (void *)sm_connection->connection,
+ status_code,
+ (void *)stream);
+ aws_http_connection_stop_new_requests(sm_connection->connection);
+ sm_connection->thread_data.stopped_new_requests = true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_incoming_header_block_done(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ if (pending_stream_acquisition->options.on_response_header_block_done) {
+ return pending_stream_acquisition->options.on_response_header_block_done(
+ stream, header_block, pending_stream_acquisition->options.user_data);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_incoming_body(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ if (pending_stream_acquisition->options.on_response_body) {
+ return pending_stream_acquisition->options.on_response_body(
+ stream, data, pending_stream_acquisition->options.user_data);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/* Helper invoked when underlying connections is still available and the num stream assigned has been updated */
+static void s_update_sm_connection_set_on_stream_finishes_synced(
+ struct aws_h2_sm_connection *sm_connection,
+ struct aws_http2_stream_manager *stream_manager) {
+
+ int re_error = 0;
+ size_t cur_num = sm_connection->num_streams_assigned;
+ size_t ideal_num = stream_manager->ideal_concurrent_streams_per_connection;
+ size_t max_num = sm_connection->max_concurrent_streams;
+ /**
+ * TODO: When the MAX_CONCURRENT_STREAMS from other side changed after the initial settings. We need to:
+ * - figure out where I am
+ * - figure out where I should be
+ * - if they're different, remove from where I am, put where should be
+ */
+ if (sm_connection->state == AWS_H2SMCST_NEARLY_FULL && cur_num < ideal_num) {
+ /* this connection is back from soft limited to ideal */
+ bool exist = false;
+ (void)exist;
+ AWS_ASSERT(
+ aws_random_access_set_exist(&stream_manager->synced_data.nonideal_available_set, sm_connection, &exist) ==
+ AWS_OP_SUCCESS &&
+ exist);
+ re_error |= aws_random_access_set_remove(&stream_manager->synced_data.nonideal_available_set, sm_connection);
+ bool added = false;
+ re_error |= aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added);
+ re_error |= !added;
+ sm_connection->state = AWS_H2SMCST_IDEAL;
+ } else if (sm_connection->state == AWS_H2SMCST_FULL && cur_num < max_num) {
+ /* this connection is back from full */
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection:%p back to available, assigned stream=%zu, max concurrent streams=%" PRIu32 "",
+ (void *)sm_connection->connection,
+ cur_num,
+ sm_connection->max_concurrent_streams);
+ bool added = false;
+ if (cur_num >= ideal_num) {
+ sm_connection->state = AWS_H2SMCST_NEARLY_FULL;
+ STREAM_MANAGER_LOGF(
+ TRACE, stream_manager, "connection:%p added to soft limited set", (void *)sm_connection->connection);
+ re_error |=
+ aws_random_access_set_add(&stream_manager->synced_data.nonideal_available_set, sm_connection, &added);
+ } else {
+ sm_connection->state = AWS_H2SMCST_IDEAL;
+ STREAM_MANAGER_LOGF(
+ TRACE, stream_manager, "connection:%p added to ideal set", (void *)sm_connection->connection);
+ re_error |=
+ aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added);
+ }
+ re_error |= !added;
+ }
+ AWS_ASSERT(re_error == AWS_OP_SUCCESS);
+ (void)re_error;
+}
+
+static void s_sm_connection_on_scheduled_stream_finishes(
+ struct aws_h2_sm_connection *sm_connection,
+ struct aws_http2_stream_manager *stream_manager) {
+ /* Reach the max current will still allow new requests, but the new stream will complete with error */
+ bool connection_available = aws_http_connection_new_requests_allowed(sm_connection->connection);
+ struct aws_http2_stream_management_transaction work;
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_OPEN_STREAM, 1);
+ --sm_connection->num_streams_assigned;
+ if (!connection_available) {
+ /* It might be removed already, but, it's fine */
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, sm_connection);
+ aws_random_access_set_remove(&stream_manager->synced_data.nonideal_available_set, sm_connection);
+ } else {
+ s_update_sm_connection_set_on_stream_finishes_synced(sm_connection, stream_manager);
+ }
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ /* After we build transaction, if the sm_connection still have zero assigned stream, we can kill the
+ * sm_connection */
+ if (sm_connection->num_streams_assigned == 0) {
+ /* It might be removed already, but, it's fine */
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, sm_connection);
+ work.sm_connection_to_release = sm_connection;
+ --stream_manager->synced_data.holding_connections_count;
+ /* After we release one connection back, we should check if we need more connections */
+ if (stream_manager->synced_data.state == AWS_H2SMST_READY &&
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]) {
+ s_check_new_connections_needed_synced(&work);
+ }
+ }
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection;
+ struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager;
+ if (pending_stream_acquisition->options.on_complete) {
+ pending_stream_acquisition->options.on_complete(
+ stream, error_code, pending_stream_acquisition->options.user_data);
+ }
+ s_sm_connection_on_scheduled_stream_finishes(sm_connection, stream_manager);
+}
+
+static void s_on_stream_destroy(void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ if (pending_stream_acquisition->options.on_destroy) {
+ pending_stream_acquisition->options.on_destroy(pending_stream_acquisition->options.user_data);
+ }
+ s_pending_stream_acquisition_destroy(pending_stream_acquisition);
+}
+
+/* Scheduled to happen from connection's thread */
+static void s_make_request_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = arg;
+ struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection;
+ struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager;
+ int error_code = AWS_ERROR_SUCCESS;
+
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "Make request task running for acquisition:%p from connection:%p thread",
+ (void *)pending_stream_acquisition,
+ (void *)sm_connection->connection);
+ bool is_shutting_down = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ is_shutting_down = stream_manager->synced_data.state != AWS_H2SMST_READY;
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_MAKE_REQUESTS, 1);
+ /* The stream has not open yet, but we increase the count here, if anything fails, the count will be decreased
+ */
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_OPEN_STREAM, 1);
+ AWS_ASSERT(
+ sm_connection->max_concurrent_streams >= sm_connection->num_streams_assigned &&
+ "The max concurrent streams exceed");
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ /* this is a channel task. If it is canceled, that means the channel shutdown. In that case, that's equivalent
+ * to a closed connection. */
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as the task is cancelled.",
+ (void *)pending_stream_acquisition);
+ error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ goto error;
+ }
+ if (is_shutting_down) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as stream manager is shutting down before task runs.",
+ (void *)pending_stream_acquisition);
+ error_code = AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN;
+ goto error;
+ }
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .request = pending_stream_acquisition->request,
+ .on_response_headers = s_on_incoming_headers,
+ .on_response_header_block_done = s_on_incoming_header_block_done,
+ .on_response_body = s_on_incoming_body,
+ .on_complete = s_on_stream_complete,
+ .on_destroy = s_on_stream_destroy,
+ .user_data = pending_stream_acquisition,
+ .http2_use_manual_data_writes = pending_stream_acquisition->options.http2_use_manual_data_writes,
+ };
+ /* TODO: we could put the pending acquisition back to the list if the connection is not available for new request.
+ */
+
+ struct aws_http_stream *stream = aws_http_connection_make_request(sm_connection->connection, &request_options);
+ if (!stream) {
+ error_code = aws_last_error();
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as HTTP level make request failed with error: %d(%s).",
+ (void *)pending_stream_acquisition,
+ error_code,
+ aws_error_str(error_code));
+ goto error;
+ }
+ /* Since we're in the connection's thread, this should be safe, there won't be any other callbacks to the user */
+ if (aws_http_stream_activate(stream)) {
+ /* Activate failed, the on_completed callback will NOT be invoked from HTTP, but we already told user about
+ * the stream. Invoke the user completed callback here */
+ error_code = aws_last_error();
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as stream activate failed with error: %d(%s).",
+ (void *)pending_stream_acquisition,
+ error_code,
+ aws_error_str(error_code));
+ goto error;
+ }
+ if (pending_stream_acquisition->callback) {
+ pending_stream_acquisition->callback(stream, 0, pending_stream_acquisition->user_data);
+ }
+
+ /* Happy case, the complete callback will be invoked, and we clean things up at the callback, but we can release the
+ * request now */
+ aws_http_message_release(pending_stream_acquisition->request);
+ pending_stream_acquisition->request = NULL;
+ return;
+error:
+ if (pending_stream_acquisition->callback) {
+ pending_stream_acquisition->callback(NULL, error_code, pending_stream_acquisition->user_data);
+ }
+ s_pending_stream_acquisition_destroy(pending_stream_acquisition);
+ /* task should happen after destroy, as the task can trigger the whole stream manager to be destroyed */
+ s_sm_connection_on_scheduled_stream_finishes(sm_connection, stream_manager);
+}
+
+/* NEVER invoke with lock held */
+static void s_aws_http2_stream_manager_execute_transaction(struct aws_http2_stream_management_transaction *work) {
+
+ struct aws_http2_stream_manager *stream_manager = work->stream_manager;
+
+ /* Step1: Release connection */
+ if (work->sm_connection_to_release) {
+ AWS_ASSERT(work->sm_connection_to_release->num_streams_assigned == 0);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "Release connection:%p back to connection manager as no outstanding streams",
+ (void *)work->sm_connection_to_release->connection);
+ s_sm_connection_release_connection(work->sm_connection_to_release);
+ }
+
+ /* Step2: Make request. The work should know what connection for the request to be made. */
+ while (!aws_linked_list_empty(&work->pending_make_requests)) {
+ /* The completions can also fail as the connection can be unavailable after the decision made. We just fail
+ * the acquisition */
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&work->pending_make_requests);
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node);
+
+ AWS_ASSERT(
+ pending_stream_acquisition->sm_connection &&
+ "Stream manager internal bug: connection is not decided before execute transaction");
+
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "acquisition:%p is scheduled to be made request from connection:%p thread",
+ (void *)pending_stream_acquisition,
+ (void *)pending_stream_acquisition->sm_connection->connection);
+ /**
+ * schedule a task from the connection's event loop to make request, so that:
+ * - We can activate the stream for user and then invoked the callback
+ * - The callback will happen asynced even the stream failed to be created
+ * - We can make sure we will not break the settings
+ */
+ struct aws_channel *channel =
+ aws_http_connection_get_channel(pending_stream_acquisition->sm_connection->connection);
+ aws_channel_task_init(
+ &pending_stream_acquisition->make_request_task,
+ s_make_request_task,
+ pending_stream_acquisition,
+ "Stream manager make request task");
+ aws_channel_schedule_task_now(channel, &pending_stream_acquisition->make_request_task);
+ }
+
+ /* Step 3: Acquire connections if needed */
+ if (work->new_connections) {
+ STREAM_MANAGER_LOGF(DEBUG, stream_manager, "acquiring %zu new connections", work->new_connections);
+ }
+ for (size_t i = 0; i < work->new_connections; ++i) {
+ aws_http_connection_manager_acquire_connection(
+ stream_manager->connection_manager, s_sm_on_connection_acquired, stream_manager);
+ }
+
+ /*
+ * Step 4: Clean up work. Do this here rather than at the end of every caller. Destroy the manager if necessary
+ */
+ s_aws_stream_management_transaction_clean_up(work);
+}
+
+void s_stream_manager_destroy_final(struct aws_http2_stream_manager *stream_manager) {
+ if (!stream_manager) {
+ return;
+ }
+
+ STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager finishes destroying self");
+ /* Connection manager has already been cleaned up */
+ AWS_FATAL_ASSERT(stream_manager->connection_manager == NULL);
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&stream_manager->synced_data.pending_stream_acquisitions));
+ aws_mutex_clean_up(&stream_manager->synced_data.lock);
+ aws_random_access_set_clean_up(&stream_manager->synced_data.ideal_available_set);
+ aws_random_access_set_clean_up(&stream_manager->synced_data.nonideal_available_set);
+ aws_client_bootstrap_release(stream_manager->bootstrap);
+
+ if (stream_manager->shutdown_complete_callback) {
+ stream_manager->shutdown_complete_callback(stream_manager->shutdown_complete_user_data);
+ }
+ aws_mem_release(stream_manager->allocator, stream_manager);
+}
+
+void s_stream_manager_on_cm_shutdown_complete(void *user_data) {
+ struct aws_http2_stream_manager *stream_manager = (struct aws_http2_stream_manager *)user_data;
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "Underlying connection manager (ip=%p) finished shutdown, stream manager can finish destroying now",
+ (void *)stream_manager->connection_manager);
+ stream_manager->connection_manager = NULL;
+ s_stream_manager_destroy_final(stream_manager);
+}
+
+static void s_stream_manager_start_destroy(struct aws_http2_stream_manager *stream_manager) {
+ STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager reaches the condition to destroy, start to destroy");
+ /* If there is no outstanding streams, the connections set should be empty. */
+ AWS_ASSERT(aws_random_access_set_get_size(&stream_manager->synced_data.ideal_available_set) == 0);
+ AWS_ASSERT(aws_random_access_set_get_size(&stream_manager->synced_data.nonideal_available_set) == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING] == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM] == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_MAKE_REQUESTS] == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] == 0);
+ AWS_ASSERT(stream_manager->connection_manager);
+ struct aws_http_connection_manager *cm = stream_manager->connection_manager;
+ stream_manager->connection_manager = NULL;
+ aws_http_connection_manager_release(cm);
+}
+
+void s_stream_manager_on_zero_external_ref(struct aws_http2_stream_manager *stream_manager) {
+ STREAM_MANAGER_LOG(
+ TRACE,
+ stream_manager,
+ "Last refcount released, manager stop accepting new stream request and will start to clean up when not "
+ "outstanding tasks remaining.");
+ struct aws_http2_stream_management_transaction work;
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ stream_manager->synced_data.state = AWS_H2SMST_DESTROYING;
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ /* Release the internal ref count as no external usage anymore */
+ aws_ref_count_release(&stream_manager->internal_ref_count);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+struct aws_http2_stream_manager *aws_http2_stream_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http2_stream_manager_options *options) {
+
+ AWS_PRECONDITION(allocator);
+ /* The other options are validated by the aws_http_connection_manager_new */
+ if (!options->http2_prior_knowledge && !options->tls_connection_options) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "Invalid options - Prior knowledge must be used for cleartext HTTP/2 connections."
+ " Upgrade from HTTP/1.1 is not supported.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+ struct aws_http2_stream_manager *stream_manager =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http2_stream_manager));
+ stream_manager->allocator = allocator;
+ aws_linked_list_init(&stream_manager->synced_data.pending_stream_acquisitions);
+
+ if (aws_mutex_init(&stream_manager->synced_data.lock)) {
+ goto on_error;
+ }
+ if (aws_random_access_set_init(
+ &stream_manager->synced_data.ideal_available_set,
+ allocator,
+ aws_hash_ptr,
+ aws_ptr_eq,
+ NULL /* destroy function */,
+ 2)) {
+ goto on_error;
+ }
+ if (aws_random_access_set_init(
+ &stream_manager->synced_data.nonideal_available_set,
+ allocator,
+ aws_hash_ptr,
+ aws_ptr_eq,
+ NULL /* destroy function */,
+ 2)) {
+ goto on_error;
+ }
+ aws_ref_count_init(
+ &stream_manager->external_ref_count,
+ stream_manager,
+ (aws_simple_completion_callback *)s_stream_manager_on_zero_external_ref);
+ aws_ref_count_init(
+ &stream_manager->internal_ref_count,
+ stream_manager,
+ (aws_simple_completion_callback *)s_stream_manager_start_destroy);
+
+ if (options->connection_ping_period_ms) {
+ stream_manager->connection_ping_period_ns =
+ aws_timestamp_convert(options->connection_ping_period_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ size_t connection_ping_timeout_ms =
+ options->connection_ping_timeout_ms ? options->connection_ping_timeout_ms : s_default_ping_timeout_ms;
+ stream_manager->connection_ping_timeout_ns =
+ aws_timestamp_convert(connection_ping_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ if (stream_manager->connection_ping_period_ns < stream_manager->connection_ping_timeout_ns) {
+ STREAM_MANAGER_LOGF(
+ WARN,
+ stream_manager,
+ "connection_ping_period_ms: %zu is shorter than connection_ping_timeout_ms: %zu. Clapping "
+ "connection_ping_timeout_ms to %zu",
+ options->connection_ping_period_ms,
+ connection_ping_timeout_ms,
+ options->connection_ping_period_ms);
+ stream_manager->connection_ping_timeout_ns = stream_manager->connection_ping_period_ns;
+ }
+ }
+
+ stream_manager->bootstrap = aws_client_bootstrap_acquire(options->bootstrap);
+ struct aws_http_connection_manager_options cm_options = {
+ .bootstrap = options->bootstrap,
+ .socket_options = options->socket_options,
+ .tls_connection_options = options->tls_connection_options,
+ .http2_prior_knowledge = options->http2_prior_knowledge,
+ .host = options->host,
+ .port = options->port,
+ .enable_read_back_pressure = options->enable_read_back_pressure,
+ .monitoring_options = options->monitoring_options,
+ .proxy_options = options->proxy_options,
+ .proxy_ev_settings = options->proxy_ev_settings,
+ .max_connections = options->max_connections,
+ .shutdown_complete_user_data = stream_manager,
+ .shutdown_complete_callback = s_stream_manager_on_cm_shutdown_complete,
+ .initial_settings_array = options->initial_settings_array,
+ .num_initial_settings = options->num_initial_settings,
+ .max_closed_streams = options->max_closed_streams,
+ .http2_conn_manual_window_management = options->conn_manual_window_management,
+ };
+ /* aws_http_connection_manager_new needs to be the last thing that can fail */
+ stream_manager->connection_manager = aws_http_connection_manager_new(allocator, &cm_options);
+ if (!stream_manager->connection_manager) {
+ goto on_error;
+ }
+ /* Nothing can fail after here */
+ stream_manager->synced_data.state = AWS_H2SMST_READY;
+ stream_manager->shutdown_complete_callback = options->shutdown_complete_callback;
+ stream_manager->shutdown_complete_user_data = options->shutdown_complete_user_data;
+ stream_manager->ideal_concurrent_streams_per_connection = options->ideal_concurrent_streams_per_connection
+ ? options->ideal_concurrent_streams_per_connection
+ : UINT32_MAX;
+ stream_manager->max_concurrent_streams_per_connection =
+ options->max_concurrent_streams_per_connection ? options->max_concurrent_streams_per_connection : UINT32_MAX;
+ stream_manager->max_connections = options->max_connections;
+ stream_manager->close_connection_on_server_error = options->close_connection_on_server_error;
+
+ return stream_manager;
+on_error:
+ s_stream_manager_destroy_final(stream_manager);
+ return NULL;
+}
+
+struct aws_http2_stream_manager *aws_http2_stream_manager_acquire(struct aws_http2_stream_manager *stream_manager) {
+ if (stream_manager) {
+ aws_ref_count_acquire(&stream_manager->external_ref_count);
+ }
+ return stream_manager;
+}
+
+struct aws_http2_stream_manager *aws_http2_stream_manager_release(struct aws_http2_stream_manager *stream_manager) {
+ if (stream_manager) {
+ aws_ref_count_release(&stream_manager->external_ref_count);
+ }
+ return NULL;
+}
+
+void aws_http2_stream_manager_acquire_stream(
+ struct aws_http2_stream_manager *stream_manager,
+ const struct aws_http2_stream_manager_acquire_stream_options *acquire_stream_option) {
+ AWS_PRECONDITION(stream_manager);
+ AWS_PRECONDITION(acquire_stream_option);
+ AWS_PRECONDITION(acquire_stream_option->callback);
+ AWS_PRECONDITION(acquire_stream_option->options);
+ struct aws_http2_stream_management_transaction work;
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = s_new_pending_stream_acquisition(
+ stream_manager->allocator,
+ acquire_stream_option->options,
+ acquire_stream_option->callback,
+ acquire_stream_option->user_data);
+ STREAM_MANAGER_LOGF(
+ TRACE, stream_manager, "Stream Manager creates acquisition:%p for user", (void *)pending_stream_acquisition);
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ /* it's use after free crime */
+ AWS_FATAL_ASSERT(stream_manager->synced_data.state != AWS_H2SMST_DESTROYING);
+ aws_linked_list_push_back(
+ &stream_manager->synced_data.pending_stream_acquisitions, &pending_stream_acquisition->node);
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, 1);
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+static size_t s_get_available_streams_num_from_connection_set(const struct aws_random_access_set *set) {
+ size_t all_available_streams_num = 0;
+ size_t ideal_connection_num = aws_random_access_set_get_size(set);
+ for (size_t i = 0; i < ideal_connection_num; i++) {
+ struct aws_h2_sm_connection *sm_connection = NULL;
+ AWS_FATAL_ASSERT(aws_random_access_set_random_get_ptr_index(set, (void **)&sm_connection, i) == AWS_OP_SUCCESS);
+ uint32_t available_streams = sm_connection->max_concurrent_streams - sm_connection->num_streams_assigned;
+ all_available_streams_num += (size_t)available_streams;
+ }
+ return all_available_streams_num;
+}
+
+void aws_http2_stream_manager_fetch_metrics(
+ const struct aws_http2_stream_manager *stream_manager,
+ struct aws_http_manager_metrics *out_metrics) {
+ AWS_PRECONDITION(stream_manager);
+ AWS_PRECONDITION(out_metrics);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data((struct aws_http2_stream_manager *)(void *)stream_manager);
+ size_t all_available_streams_num = 0;
+ all_available_streams_num +=
+ s_get_available_streams_num_from_connection_set(&stream_manager->synced_data.ideal_available_set);
+ all_available_streams_num +=
+ s_get_available_streams_num_from_connection_set(&stream_manager->synced_data.nonideal_available_set);
+ out_metrics->pending_concurrency_acquires =
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION];
+ out_metrics->available_concurrency = all_available_streams_num;
+ out_metrics->leased_concurrency = stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM];
+ s_unlock_synced_data((struct aws_http2_stream_manager *)(void *)stream_manager);
+ } /* END CRITICAL SECTION */
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/proxy_connection.c b/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
new file mode 100644
index 00000000000..e6cdb8a2460
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
@@ -0,0 +1,1658 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/proxy_impl.h>
+
+#include <aws/common/encoding.h>
+#include <aws/common/environment.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/string.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/proxy.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif
+
+AWS_STATIC_STRING_FROM_LITERAL(s_host_header_name, "Host");
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_connection_header_name, "Proxy-Connection");
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_connection_header_value, "Keep-Alive");
+AWS_STATIC_STRING_FROM_LITERAL(s_options_method, "OPTIONS");
+AWS_STATIC_STRING_FROM_LITERAL(s_star_path, "*");
+
+AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var, "HTTP_PROXY");
+AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var_low, "http_proxy");
+AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var, "HTTPS_PROXY");
+AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var_low, "https_proxy");
+
+#ifndef BYO_CRYPTO
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_no_verify_peer_env_var, "AWS_PROXY_NO_VERIFY_PEER");
+#endif
+
+static struct aws_http_proxy_system_vtable s_default_vtable = {
+ .setup_client_tls = &aws_channel_setup_client_tls,
+};
+
+static struct aws_http_proxy_system_vtable *s_vtable = &s_default_vtable;
+
+void aws_http_proxy_system_set_vtable(struct aws_http_proxy_system_vtable *vtable) {
+ s_vtable = vtable;
+}
+
+void aws_http_proxy_user_data_destroy(struct aws_http_proxy_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+ aws_hash_table_clean_up(&user_data->alpn_string_map);
+
+ /*
+ * For tunneling connections, this is now internal and never surfaced to the user, so it's our responsibility
+ * to clean up the last reference.
+ */
+ if (user_data->proxy_connection != NULL && user_data->proxy_config->connection_type == AWS_HPCT_HTTP_TUNNEL) {
+ aws_http_connection_release(user_data->proxy_connection);
+ user_data->proxy_connection = NULL;
+ }
+
+ aws_string_destroy(user_data->original_host);
+ if (user_data->proxy_config) {
+ aws_http_proxy_config_destroy(user_data->proxy_config);
+ }
+
+ if (user_data->original_tls_options) {
+ aws_tls_connection_options_clean_up(user_data->original_tls_options);
+ aws_mem_release(user_data->allocator, user_data->original_tls_options);
+ }
+
+ aws_http_proxy_negotiator_release(user_data->proxy_negotiator);
+
+ aws_client_bootstrap_release(user_data->original_bootstrap);
+
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+struct aws_http_proxy_user_data *aws_http_proxy_user_data_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *orig_options,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_setup,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown) {
+
+ AWS_FATAL_ASSERT(orig_options->proxy_options != NULL);
+ /* make copy of options, and add defaults for missing optional structs */
+ struct aws_http_client_connection_options options = *orig_options;
+
+ struct aws_http1_connection_options default_http1_options;
+ AWS_ZERO_STRUCT(default_http1_options);
+ if (options.http1_options == NULL) {
+ options.http1_options = &default_http1_options;
+ }
+
+ struct aws_http2_connection_options default_http2_options;
+ AWS_ZERO_STRUCT(default_http2_options);
+ if (options.http2_options == NULL) {
+ options.http2_options = &default_http2_options;
+ }
+
+ struct aws_http2_setting *setting_array = NULL;
+ struct aws_http_proxy_user_data *user_data = NULL;
+ aws_mem_acquire_many(
+ options.allocator,
+ 2,
+ &user_data,
+ sizeof(struct aws_http_proxy_user_data),
+ &setting_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting));
+ AWS_ZERO_STRUCT(*user_data);
+
+ user_data->allocator = allocator;
+ user_data->state = AWS_PBS_SOCKET_CONNECT;
+ user_data->error_code = AWS_ERROR_SUCCESS;
+ user_data->connect_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ user_data->original_bootstrap = aws_client_bootstrap_acquire(options.bootstrap);
+ if (options.socket_options != NULL) {
+ user_data->original_socket_options = *options.socket_options;
+ }
+ user_data->original_manual_window_management = options.manual_window_management;
+ user_data->original_initial_window_size = options.initial_window_size;
+
+ user_data->original_host = aws_string_new_from_cursor(allocator, &options.host_name);
+ if (user_data->original_host == NULL) {
+ goto on_error;
+ }
+
+ user_data->original_port = options.port;
+
+ user_data->proxy_config = aws_http_proxy_config_new_from_connection_options(allocator, &options);
+ if (user_data->proxy_config == NULL) {
+ goto on_error;
+ }
+
+ user_data->proxy_negotiator =
+ aws_http_proxy_strategy_create_negotiator(user_data->proxy_config->proxy_strategy, allocator);
+ if (user_data->proxy_negotiator == NULL) {
+ goto on_error;
+ }
+
+ if (options.tls_options) {
+ /* clone tls options, but redirect user data to what we're creating */
+ user_data->original_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (user_data->original_tls_options == NULL ||
+ aws_tls_connection_options_copy(user_data->original_tls_options, options.tls_options)) {
+ goto on_error;
+ }
+
+ user_data->original_tls_options->user_data = user_data;
+ }
+
+ if (aws_http_alpn_map_init_copy(options.allocator, &user_data->alpn_string_map, options.alpn_string_map)) {
+ goto on_error;
+ }
+
+ user_data->original_http_on_setup = options.on_setup;
+ user_data->original_http_on_shutdown = options.on_shutdown;
+ user_data->original_channel_on_setup = on_channel_setup;
+ user_data->original_channel_on_shutdown = on_channel_shutdown;
+ user_data->requested_event_loop = options.requested_event_loop;
+ user_data->prior_knowledge_http2 = options.prior_knowledge_http2;
+
+ /* one and only one setup callback must be valid */
+ AWS_FATAL_ASSERT((user_data->original_http_on_setup == NULL) != (user_data->original_channel_on_setup == NULL));
+
+ /* one and only one shutdown callback must be valid */
+ AWS_FATAL_ASSERT(
+ (user_data->original_http_on_shutdown == NULL) != (user_data->original_channel_on_shutdown == NULL));
+
+ /* callback set must be self-consistent. Technically the second check is redundant given the previous checks */
+ AWS_FATAL_ASSERT((user_data->original_http_on_setup == NULL) == (user_data->original_http_on_shutdown == NULL));
+ AWS_FATAL_ASSERT(
+ (user_data->original_channel_on_setup == NULL) == (user_data->original_channel_on_shutdown == NULL));
+
+ user_data->original_user_data = options.user_data;
+ user_data->original_http1_options = *options.http1_options;
+ user_data->original_http2_options = *options.http2_options;
+
+ /* keep a copy of the settings array if it's not NULL */
+ if (options.http2_options->num_initial_settings > 0) {
+ memcpy(
+ setting_array,
+ options.http2_options->initial_settings_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting));
+ user_data->original_http2_options.initial_settings_array = setting_array;
+ }
+
+ return user_data;
+
+on_error:
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy connection failed to create user data with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_http_proxy_user_data_destroy(user_data);
+
+ return NULL;
+}
+
+struct aws_http_proxy_user_data *aws_http_proxy_user_data_new_reset_clone(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_user_data *old_user_data) {
+
+ AWS_FATAL_ASSERT(old_user_data != NULL);
+
+ struct aws_http2_setting *setting_array = NULL;
+ struct aws_http_proxy_user_data *user_data = NULL;
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &user_data,
+ sizeof(struct aws_http_proxy_user_data),
+ &setting_array,
+ old_user_data->original_http2_options.num_initial_settings * sizeof(struct aws_http2_setting));
+
+ AWS_ZERO_STRUCT(*user_data);
+ user_data->allocator = allocator;
+ user_data->state = AWS_PBS_SOCKET_CONNECT;
+ user_data->error_code = AWS_ERROR_SUCCESS;
+ user_data->connect_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ user_data->original_bootstrap = aws_client_bootstrap_acquire(old_user_data->original_bootstrap);
+ user_data->original_socket_options = old_user_data->original_socket_options;
+ user_data->original_manual_window_management = old_user_data->original_manual_window_management;
+ user_data->original_initial_window_size = old_user_data->original_initial_window_size;
+ user_data->prior_knowledge_http2 = old_user_data->prior_knowledge_http2;
+
+ user_data->original_host = aws_string_new_from_string(allocator, old_user_data->original_host);
+ if (user_data->original_host == NULL) {
+ goto on_error;
+ }
+
+ user_data->original_port = old_user_data->original_port;
+
+ user_data->proxy_config = aws_http_proxy_config_new_clone(allocator, old_user_data->proxy_config);
+ if (user_data->proxy_config == NULL) {
+ goto on_error;
+ }
+
+ user_data->proxy_negotiator = aws_http_proxy_negotiator_acquire(old_user_data->proxy_negotiator);
+ if (user_data->proxy_negotiator == NULL) {
+ goto on_error;
+ }
+
+ if (old_user_data->original_tls_options) {
+ /* clone tls options, but redirect user data to what we're creating */
+ user_data->original_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (user_data->original_tls_options == NULL ||
+ aws_tls_connection_options_copy(user_data->original_tls_options, old_user_data->original_tls_options)) {
+ goto on_error;
+ }
+
+ user_data->original_tls_options->user_data = user_data;
+ }
+
+ if (aws_http_alpn_map_init_copy(allocator, &user_data->alpn_string_map, &old_user_data->alpn_string_map)) {
+ goto on_error;
+ }
+
+ user_data->original_http_on_setup = old_user_data->original_http_on_setup;
+ user_data->original_http_on_shutdown = old_user_data->original_http_on_shutdown;
+ user_data->original_channel_on_setup = old_user_data->original_channel_on_setup;
+ user_data->original_channel_on_shutdown = old_user_data->original_channel_on_shutdown;
+ user_data->original_user_data = old_user_data->original_user_data;
+ user_data->original_http1_options = old_user_data->original_http1_options;
+ user_data->original_http2_options = old_user_data->original_http2_options;
+
+ /* keep a copy of the settings array if it's not NULL */
+ if (old_user_data->original_http2_options.num_initial_settings > 0) {
+ memcpy(
+ setting_array,
+ old_user_data->original_http2_options.initial_settings_array,
+ old_user_data->original_http2_options.num_initial_settings * sizeof(struct aws_http2_setting));
+ user_data->original_http2_options.initial_settings_array = setting_array;
+ }
+
+ return user_data;
+
+on_error:
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy connection failed to create user data with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_http_proxy_user_data_destroy(user_data);
+
+ return NULL;
+}
+
+/*
+ * Examines the proxy user data state and determines whether to make an http-interface setup callback
+ * or a raw channel setup callback
+ */
+static void s_do_on_setup_callback(
+ struct aws_http_proxy_user_data *proxy_ud,
+ struct aws_http_connection *connection,
+ int error_code) {
+ if (proxy_ud->original_http_on_setup) {
+ proxy_ud->original_http_on_setup(connection, error_code, proxy_ud->original_user_data);
+ proxy_ud->original_http_on_setup = NULL;
+ }
+
+ if (proxy_ud->original_channel_on_setup) {
+ struct aws_channel *channel = NULL;
+ if (connection != NULL) {
+ channel = aws_http_connection_get_channel(connection);
+ }
+ proxy_ud->original_channel_on_setup(
+ proxy_ud->original_bootstrap, error_code, channel, proxy_ud->original_user_data);
+ proxy_ud->original_channel_on_setup = NULL;
+ }
+}
+
+/*
+ * Examines the proxy user data state and determines whether to make an http-interface shutdown callback
+ * or a raw channel shutdown callback
+ */
+static void s_do_on_shutdown_callback(struct aws_http_proxy_user_data *proxy_ud, int error_code) {
+ AWS_FATAL_ASSERT(proxy_ud->proxy_connection);
+
+ if (proxy_ud->original_http_on_shutdown) {
+ AWS_FATAL_ASSERT(proxy_ud->final_connection);
+ proxy_ud->original_http_on_shutdown(proxy_ud->final_connection, error_code, proxy_ud->original_user_data);
+ proxy_ud->original_http_on_shutdown = NULL;
+ }
+
+ if (proxy_ud->original_channel_on_shutdown) {
+ struct aws_channel *channel = aws_http_connection_get_channel(proxy_ud->proxy_connection);
+ proxy_ud->original_channel_on_shutdown(
+ proxy_ud->original_bootstrap, error_code, channel, proxy_ud->original_user_data);
+ proxy_ud->original_channel_on_shutdown = NULL;
+ }
+}
+
+/*
+ * Connection callback used ONLY by forwarding http proxy connections. After this,
+ * the connection is live and the user is notified
+ */
+static void s_aws_http_on_client_connection_http_forwarding_proxy_setup_fn(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ s_do_on_setup_callback(proxy_ud, connection, error_code);
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_http_proxy_user_data_destroy(user_data);
+ } else {
+ /*
+ * The proxy connection and final connection are the same in forwarding proxy connections. This lets
+ * us unconditionally use fatal asserts on these being non-null regardless of proxy configuration.
+ */
+ proxy_ud->proxy_connection = connection;
+ proxy_ud->final_connection = connection;
+ proxy_ud->state = AWS_PBS_SUCCESS;
+ }
+}
+
+/*
+ * Connection shutdown callback used by both http and https proxy connections. Only invokes
+ * user shutdown if the connection was successfully established. Otherwise, it invokes
+ * the user setup function with an error.
+ */
+static void s_aws_http_on_client_connection_http_proxy_shutdown_fn(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ if (proxy_ud->state == AWS_PBS_SUCCESS) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection (channel %p) shutting down.",
+ (void *)connection,
+ (void *)aws_http_connection_get_channel(connection));
+ s_do_on_shutdown_callback(proxy_ud, error_code);
+ } else {
+ int ec = error_code;
+ if (ec == AWS_ERROR_SUCCESS) {
+ ec = proxy_ud->error_code;
+ }
+ if (ec == AWS_ERROR_SUCCESS) {
+ ec = AWS_ERROR_UNKNOWN;
+ }
+
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Error %d while connecting to \"%s\" via proxy.",
+ (void *)connection,
+ ec,
+ (char *)proxy_ud->original_host->bytes);
+
+ s_do_on_setup_callback(proxy_ud, NULL, ec);
+ }
+
+ aws_http_proxy_user_data_destroy(user_data);
+}
+
+/*
+ * On-any-error entry point that releases all resources involved in establishing the proxy connection.
+ * This must not be invoked any time after a successful setup callback.
+ */
+static void s_aws_http_proxy_user_data_shutdown(struct aws_http_proxy_user_data *user_data) {
+
+ user_data->state = AWS_PBS_FAILURE;
+
+ if (user_data->proxy_connection == NULL) {
+ s_do_on_setup_callback(user_data, NULL, user_data->error_code);
+ aws_http_proxy_user_data_destroy(user_data);
+ return;
+ }
+
+ if (user_data->connect_stream) {
+ aws_http_stream_release(user_data->connect_stream);
+ user_data->connect_stream = NULL;
+ }
+
+ if (user_data->connect_request) {
+ aws_http_message_destroy(user_data->connect_request);
+ user_data->connect_request = NULL;
+ }
+
+ struct aws_http_connection *http_connection = user_data->proxy_connection;
+ user_data->proxy_connection = NULL;
+
+ aws_channel_shutdown(http_connection->channel_slot->channel, user_data->error_code);
+ aws_http_connection_release(http_connection);
+}
+
+static struct aws_http_message *s_build_h1_proxy_connect_request(struct aws_http_proxy_user_data *user_data) {
+ struct aws_http_message *request = aws_http_message_new_request(user_data->allocator);
+ if (request == NULL) {
+ return NULL;
+ }
+
+ struct aws_byte_buf path_buffer;
+ AWS_ZERO_STRUCT(path_buffer);
+
+ if (aws_http_message_set_request_method(request, aws_http_method_connect)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init(&path_buffer, user_data->allocator, user_data->original_host->len + 10)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor host_cursor = aws_byte_cursor_from_string(user_data->original_host);
+ if (aws_byte_buf_append(&path_buffer, &host_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor colon_cursor = aws_byte_cursor_from_c_str(":");
+ if (aws_byte_buf_append(&path_buffer, &colon_cursor)) {
+ goto on_error;
+ }
+
+ char port_str[20] = "\0";
+ snprintf(port_str, sizeof(port_str), "%d", (int)user_data->original_port);
+ struct aws_byte_cursor port_cursor = aws_byte_cursor_from_c_str(port_str);
+ if (aws_byte_buf_append(&path_buffer, &port_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor path_cursor = aws_byte_cursor_from_array(path_buffer.buffer, path_buffer.len);
+ if (aws_http_message_set_request_path(request, path_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_http_header host_header = {
+ .name = aws_byte_cursor_from_string(s_host_header_name),
+ .value = aws_byte_cursor_from_array(path_buffer.buffer, path_buffer.len),
+ };
+ if (aws_http_message_add_header(request, host_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header keep_alive_header = {
+ .name = aws_byte_cursor_from_string(s_proxy_connection_header_name),
+ .value = aws_byte_cursor_from_string(s_proxy_connection_header_value),
+ };
+ if (aws_http_message_add_header(request, keep_alive_header)) {
+ goto on_error;
+ }
+
+ aws_byte_buf_clean_up(&path_buffer);
+
+ return request;
+
+on_error:
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) TLS proxy connection failed to build CONNECT request with error %d(%s)",
+ (void *)user_data->proxy_connection,
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_byte_buf_clean_up(&path_buffer);
+ aws_http_message_destroy(request);
+
+ return NULL;
+}
+
+/*
+ * Builds the CONNECT request issued after proxy connection establishment, during the creation of
+ * tls-enabled proxy connections.
+ */
+static struct aws_http_message *s_build_proxy_connect_request(struct aws_http_proxy_user_data *user_data) {
+ struct aws_http_connection *proxy_connection = user_data->proxy_connection;
+ switch (proxy_connection->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ return s_build_h1_proxy_connect_request(user_data);
+ default:
+ aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL);
+ return NULL;
+ }
+}
+
+static int s_aws_http_on_incoming_body_tunnel_proxy(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data) {
+ (void)stream;
+
+ struct aws_http_proxy_user_data *context = user_data;
+ aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body =
+ context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_incoming_body_callback;
+ if (on_incoming_body != NULL) {
+ (*on_incoming_body)(context->proxy_negotiator, data);
+ }
+
+ aws_http_stream_update_window(stream, data->len);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_http_on_response_headers_tunnel_proxy(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+ (void)stream;
+
+ struct aws_http_proxy_user_data *context = user_data;
+ aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers =
+ context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_incoming_headers_callback;
+ if (on_incoming_headers != NULL) {
+ (*on_incoming_headers)(context->proxy_negotiator, header_block, header_array, num_headers);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Headers done callback for the CONNECT request made during tls proxy connections
+ */
+static int s_aws_http_on_incoming_header_block_done_tunnel_proxy(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data) {
+
+ struct aws_http_proxy_user_data *context = user_data;
+
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ int status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ aws_http_stream_get_incoming_response_status(stream, &status_code);
+ context->connect_status_code = (enum aws_http_status_code)status_code;
+ if (context->connect_status_code != AWS_HTTP_STATUS_CODE_200_OK) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy CONNECT request failed with status code %d",
+ (void *)context->proxy_connection,
+ context->connect_status_code);
+ context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ }
+
+ aws_http_proxy_negotiator_connect_status_fn *on_status =
+ context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_status_callback;
+ if (on_status != NULL) {
+ (*on_status)(context->proxy_negotiator, context->connect_status_code);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_http_apply_http_connection_to_proxied_channel(struct aws_http_proxy_user_data *context) {
+ AWS_FATAL_ASSERT(context->proxy_connection != NULL);
+ AWS_FATAL_ASSERT(context->original_http_on_setup != NULL);
+
+ struct aws_channel *channel = aws_http_connection_get_channel(context->proxy_connection);
+
+ struct aws_http_connection *connection = aws_http_connection_new_channel_handler(
+ context->allocator,
+ channel,
+ false,
+ context->original_tls_options != NULL,
+ context->original_manual_window_management,
+ context->prior_knowledge_http2,
+ context->original_initial_window_size,
+ context->alpn_string_map.p_impl == NULL ? NULL : &context->alpn_string_map,
+ &context->original_http1_options,
+ &context->original_http2_options,
+ context->original_user_data);
+ if (connection == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create the client connection object, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: " PRInSTR " client connection established.",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->http_version)));
+
+ context->final_connection = connection;
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_do_final_proxied_channel_setup(struct aws_http_proxy_user_data *proxy_ud) {
+ if (proxy_ud->original_http_on_setup != NULL) {
+ /*
+ * If we're transitioning to http with http setup/shutdown callbacks, try to apply a new http connection to
+ * the channel
+ */
+ if (s_aws_http_apply_http_connection_to_proxied_channel(proxy_ud)) {
+ proxy_ud->error_code = aws_last_error();
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+ return;
+ }
+
+ s_do_on_setup_callback(proxy_ud, proxy_ud->final_connection, AWS_ERROR_SUCCESS);
+ } else {
+ /*
+ * Otherwise invoke setup directly (which will end up being channel setup)
+ */
+ s_do_on_setup_callback(proxy_ud, proxy_ud->proxy_connection, AWS_ERROR_SUCCESS);
+ }
+
+ /* Tell user of successful connection. */
+ proxy_ud->state = AWS_PBS_SUCCESS;
+}
+
+/*
+ * Tls negotiation callback for tls proxy connections
+ */
+static void s_on_origin_server_tls_negotation_result(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ int error_code,
+ void *user_data) {
+
+ (void)handler;
+ (void)slot;
+
+ struct aws_http_proxy_user_data *context = user_data;
+ if (error_code != AWS_ERROR_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection failed origin server TLS negotiation with error %d(%s)",
+ (void *)context->proxy_connection,
+ error_code,
+ aws_error_str(error_code));
+ context->error_code = error_code;
+ s_aws_http_proxy_user_data_shutdown(context);
+ return;
+ }
+
+ s_do_final_proxied_channel_setup(context);
+}
+
+static int s_create_tunneling_connection(struct aws_http_proxy_user_data *user_data);
+static int s_make_proxy_connect_request(struct aws_http_proxy_user_data *user_data);
+
+static void s_zero_callbacks(struct aws_http_proxy_user_data *proxy_ud) {
+ proxy_ud->original_http_on_shutdown = NULL;
+ proxy_ud->original_http_on_setup = NULL;
+ proxy_ud->original_channel_on_shutdown = NULL;
+ proxy_ud->original_channel_on_setup = NULL;
+}
+
+/*
+ * Stream done callback for the CONNECT request made during tls proxy connections
+ */
+static void s_aws_http_on_stream_complete_tunnel_proxy(
+ struct aws_http_stream *stream,
+ int error_code,
+ void *user_data) {
+ struct aws_http_proxy_user_data *context = user_data;
+ AWS_FATAL_ASSERT(stream == context->connect_stream);
+
+ if (context->error_code == AWS_ERROR_SUCCESS && error_code != AWS_ERROR_SUCCESS) {
+ context->error_code = error_code;
+ }
+
+ if (context->error_code != AWS_ERROR_SUCCESS) {
+ context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ if (context->connect_status_code == AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED) {
+ enum aws_http_proxy_negotiation_retry_directive retry_directive =
+ aws_http_proxy_negotiator_get_retry_directive(context->proxy_negotiator);
+
+ if (retry_directive == AWS_HPNRD_NEW_CONNECTION) {
+ struct aws_http_proxy_user_data *new_context =
+ aws_http_proxy_user_data_new_reset_clone(context->allocator, context);
+ if (new_context != NULL && s_create_tunneling_connection(new_context) == AWS_OP_SUCCESS) {
+ /*
+ * We successfully kicked off a new connection. By NULLing the callbacks on the old one, we can
+ * shut it down quietly without the user being notified. The new connection will notify the user
+ * based on its success or failure.
+ */
+ s_zero_callbacks(context);
+ context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE;
+ }
+ } else if (retry_directive == AWS_HPNRD_CURRENT_CONNECTION) {
+ context->error_code = AWS_ERROR_SUCCESS;
+ if (s_make_proxy_connect_request(context) == AWS_OP_SUCCESS) {
+ return;
+ }
+ }
+ }
+
+ s_aws_http_proxy_user_data_shutdown(context);
+ return;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection made successful CONNECT request to \"%s\" via proxy",
+ (void *)context->proxy_connection,
+ context->original_host->bytes);
+
+ /*
+ * We're finished with these, let's release
+ */
+ aws_http_stream_release(stream);
+ context->connect_stream = NULL;
+ aws_http_message_destroy(context->connect_request);
+ context->connect_request = NULL;
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION, "(%p) Beginning TLS negotiation through proxy", (void *)context->proxy_connection);
+
+ if (context->original_tls_options != NULL) {
+ /*
+ * Perform TLS negotiation to the origin server through proxy
+ */
+ context->original_tls_options->on_negotiation_result = s_on_origin_server_tls_negotation_result;
+
+ context->state = AWS_PBS_TLS_NEGOTIATION;
+ struct aws_channel *channel = aws_http_connection_get_channel(context->proxy_connection);
+
+ struct aws_channel_slot *last_slot = aws_channel_get_first_slot(channel);
+ while (last_slot->adj_right != NULL) {
+ last_slot = last_slot->adj_right;
+ }
+
+ if (s_vtable->setup_client_tls(last_slot, context->original_tls_options)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection failed to start TLS negotiation with error %d(%s)",
+ (void *)context->proxy_connection,
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+ s_aws_http_proxy_user_data_shutdown(context);
+ return;
+ }
+ } else {
+ s_do_final_proxied_channel_setup(context);
+ }
+}
+
+static void s_terminate_tunneling_connect(
+ struct aws_http_message *message,
+ int error_code,
+ void *internal_proxy_user_data) {
+ (void)message;
+
+ struct aws_http_proxy_user_data *proxy_ud = internal_proxy_user_data;
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Tunneling proxy connection failed to create request stream for CONNECT request with error %d(%s)",
+ (void *)proxy_ud->proxy_connection,
+ error_code,
+ aws_error_str(error_code));
+
+ proxy_ud->error_code = error_code;
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+}
+
+static void s_continue_tunneling_connect(struct aws_http_message *message, void *internal_proxy_user_data) {
+ struct aws_http_proxy_user_data *proxy_ud = internal_proxy_user_data;
+
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .request = message,
+ .user_data = proxy_ud,
+ .on_response_headers = s_aws_http_on_response_headers_tunnel_proxy,
+ .on_response_header_block_done = s_aws_http_on_incoming_header_block_done_tunnel_proxy,
+ .on_response_body = s_aws_http_on_incoming_body_tunnel_proxy,
+ .on_complete = s_aws_http_on_stream_complete_tunnel_proxy,
+ };
+
+ if (proxy_ud->connect_stream != NULL) {
+ aws_http_stream_release(proxy_ud->connect_stream);
+ }
+
+ proxy_ud->connect_stream = aws_http_connection_make_request(proxy_ud->proxy_connection, &request_options);
+ if (proxy_ud->connect_stream == NULL) {
+ goto on_error;
+ }
+
+ aws_http_stream_activate(proxy_ud->connect_stream);
+
+ return;
+
+on_error:
+
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+}
+
+/*
+ * Issues a CONNECT request on an http connection
+ */
+static int s_make_proxy_connect_request(struct aws_http_proxy_user_data *user_data) {
+ if (user_data->connect_request != NULL) {
+ aws_http_message_destroy(user_data->connect_request);
+ user_data->connect_request = NULL;
+ }
+
+ user_data->connect_request = s_build_proxy_connect_request(user_data);
+ if (user_data->connect_request == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ (*user_data->proxy_negotiator->strategy_vtable.tunnelling_vtable->connect_request_transform)(
+ user_data->proxy_negotiator,
+ user_data->connect_request,
+ s_terminate_tunneling_connect,
+ s_continue_tunneling_connect,
+ user_data);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Connection setup callback for tunneling proxy connections.
+ */
+static void s_aws_http_on_client_connection_http_tunneling_proxy_setup_fn(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ proxy_ud->error_code = error_code;
+ if (error_code != AWS_ERROR_SUCCESS) {
+ goto on_error;
+ }
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "(%p) Making CONNECT request to proxy", (void *)proxy_ud->proxy_connection);
+
+ proxy_ud->proxy_connection = connection;
+ proxy_ud->state = AWS_PBS_HTTP_CONNECT;
+ if (s_make_proxy_connect_request(proxy_ud)) {
+ goto on_error;
+ }
+
+ return;
+
+on_error:
+
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+}
+
+/*
+ * Checks for the special case when a request is an OPTIONS request with *
+ * path and no query params
+ */
+static bool s_is_star_path_options_method(const struct aws_http_message *request) {
+ struct aws_byte_cursor method_cursor;
+ if (aws_http_message_get_request_method(request, &method_cursor)) {
+ return false;
+ }
+
+ struct aws_byte_cursor options_cursor = aws_byte_cursor_from_string(s_options_method);
+ if (!aws_byte_cursor_eq_ignore_case(&method_cursor, &options_cursor)) {
+ return false;
+ }
+
+ struct aws_byte_cursor path_cursor;
+ if (aws_http_message_get_request_path(request, &path_cursor)) {
+ return false;
+ }
+
+ struct aws_byte_cursor star_cursor = aws_byte_cursor_from_string(s_star_path);
+ if (!aws_byte_cursor_eq_ignore_case(&path_cursor, &star_cursor)) {
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Modifies a requests uri by transforming it to absolute form according to
+ * section 5.3.2 of rfc 7230
+ *
+ * We do this by parsing the existing uri and then rebuilding it as an
+ * absolute resource path (using the original connection options)
+ */
+int aws_http_rewrite_uri_for_proxy_request(
+ struct aws_http_message *request,
+ struct aws_http_proxy_user_data *proxy_user_data) {
+ int result = AWS_OP_ERR;
+
+ struct aws_uri target_uri;
+ AWS_ZERO_STRUCT(target_uri);
+
+ struct aws_byte_cursor path_cursor;
+ AWS_ZERO_STRUCT(path_cursor);
+
+ if (aws_http_message_get_request_path(request, &path_cursor)) {
+ goto done;
+ }
+
+ /* Pull out the original path/query */
+ struct aws_uri uri;
+ if (aws_uri_init_parse(&uri, proxy_user_data->allocator, &path_cursor)) {
+ goto done;
+ }
+
+ const struct aws_byte_cursor *actual_path_cursor = aws_uri_path(&uri);
+ const struct aws_byte_cursor *actual_query_cursor = aws_uri_query_string(&uri);
+
+ /* now rebuild the uri with scheme, host and port subbed in from the original connection options */
+ struct aws_uri_builder_options target_uri_builder;
+ AWS_ZERO_STRUCT(target_uri_builder);
+ target_uri_builder.scheme = aws_http_scheme_http;
+ target_uri_builder.path = *actual_path_cursor;
+ target_uri_builder.host_name = aws_byte_cursor_from_string(proxy_user_data->original_host);
+ target_uri_builder.port = proxy_user_data->original_port;
+ target_uri_builder.query_string = *actual_query_cursor;
+
+ if (aws_uri_init_from_builder_options(&target_uri, proxy_user_data->allocator, &target_uri_builder)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor full_target_uri =
+ aws_byte_cursor_from_array(target_uri.uri_str.buffer, target_uri.uri_str.len);
+
+ /*
+ * By rfc 7230, Section 5.3.4, a star-pathed options request made through a proxy MUST be transformed (at the last
+ * proxy) back into a star-pathed request if the proxy request has an empty path and no query string. This
+ * is behavior we want to support. So from our side, we need to make sure that star-pathed options requests
+ * get translated into options requests with the authority as the uri and an empty path-query.
+ *
+ * Our URI transform always ends with a '/' which is technically not an empty path. To address this,
+ * the easiest thing to do is just detect if this was originally a star-pathed options request
+ * and drop the final '/' from the path.
+ */
+ if (s_is_star_path_options_method(request)) {
+ if (full_target_uri.len > 0 && *(full_target_uri.ptr + full_target_uri.len - 1) == '/') {
+ full_target_uri.len -= 1;
+ }
+ }
+
+ /* mutate the request with the new path value */
+ if (aws_http_message_set_request_path(request, full_target_uri)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_uri_clean_up(&target_uri);
+ aws_uri_clean_up(&uri);
+
+ return result;
+}
+
+/*
+ * Plaintext proxy request transformation function
+ *
+ * Rewrites the target uri to absolute form and injects any desired headers
+ */
+static int s_proxy_http_request_transform(struct aws_http_message *request, void *user_data) {
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ if (aws_http_rewrite_uri_for_proxy_request(request, proxy_ud)) {
+ return AWS_OP_ERR;
+ }
+
+ if ((*proxy_ud->proxy_negotiator->strategy_vtable.forwarding_vtable->forward_request_transform)(
+ proxy_ud->proxy_negotiator, request)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Top-level function to route a connection request through a proxy server, with no channel security
+ */
+static int s_aws_http_client_connect_via_forwarding_proxy(const struct aws_http_client_connection_options *options) {
+ AWS_FATAL_ASSERT(options->tls_options == NULL);
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Connecting to \"" PRInSTR "\" via proxy \"" PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(options->host_name),
+ AWS_BYTE_CURSOR_PRI(options->proxy_options->host));
+
+ /* Create a wrapper user data that contains all proxy-related information, state, and user-facing callbacks */
+ struct aws_http_proxy_user_data *proxy_user_data =
+ aws_http_proxy_user_data_new(options->allocator, options, NULL, NULL);
+ if (proxy_user_data == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ /* Fill in a new connection options pointing at the proxy */
+ struct aws_http_client_connection_options options_copy = *options;
+
+ options_copy.proxy_options = NULL;
+ options_copy.host_name = options->proxy_options->host;
+ options_copy.port = options->proxy_options->port;
+ options_copy.user_data = proxy_user_data;
+ options_copy.on_setup = s_aws_http_on_client_connection_http_forwarding_proxy_setup_fn;
+ options_copy.on_shutdown = s_aws_http_on_client_connection_http_proxy_shutdown_fn;
+ options_copy.tls_options = options->proxy_options->tls_options;
+ options_copy.requested_event_loop = options->requested_event_loop;
+ options_copy.prior_knowledge_http2 = false; /* ToDo, expose the protocol specific config for proxy connection. */
+
+ int result = aws_http_client_connect_internal(&options_copy, s_proxy_http_request_transform);
+ if (result == AWS_OP_ERR) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy http connection failed client connect with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_http_proxy_user_data_destroy(proxy_user_data);
+ }
+
+ return result;
+}
+
+static int s_create_tunneling_connection(struct aws_http_proxy_user_data *user_data) {
+ struct aws_http_client_connection_options connect_options;
+ AWS_ZERO_STRUCT(connect_options);
+
+ connect_options.self_size = sizeof(struct aws_http_client_connection_options);
+ connect_options.allocator = user_data->allocator;
+ connect_options.bootstrap = user_data->original_bootstrap;
+ connect_options.host_name = aws_byte_cursor_from_buf(&user_data->proxy_config->host);
+ connect_options.port = user_data->proxy_config->port;
+ connect_options.socket_options = &user_data->original_socket_options;
+ connect_options.tls_options = user_data->proxy_config->tls_options;
+ connect_options.monitoring_options = NULL; /* ToDo */
+ connect_options.manual_window_management = user_data->original_manual_window_management;
+ connect_options.initial_window_size = user_data->original_initial_window_size;
+ connect_options.user_data = user_data;
+ connect_options.on_setup = s_aws_http_on_client_connection_http_tunneling_proxy_setup_fn;
+ connect_options.on_shutdown = s_aws_http_on_client_connection_http_proxy_shutdown_fn;
+ connect_options.http1_options = NULL; /* ToDo, expose the protocol specific config for proxy connection. */
+ connect_options.http2_options = NULL; /* ToDo */
+ connect_options.requested_event_loop = user_data->requested_event_loop;
+
+ int result = aws_http_client_connect(&connect_options);
+ if (result == AWS_OP_ERR) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy tunnel connection failed client connect with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+ aws_http_proxy_user_data_destroy(user_data);
+ }
+
+ return result;
+}
+
+/*
+ * Top-level function to route a connection through a proxy server via a CONNECT request
+ */
+static int s_aws_http_client_connect_via_tunneling_proxy(
+ const struct aws_http_client_connection_options *options,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_setup,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown) {
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Connecting to \"" PRInSTR "\" through a tunnel via proxy \"" PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(options->host_name),
+ AWS_BYTE_CURSOR_PRI(options->proxy_options->host));
+
+ /* Create a wrapper user data that contains all proxy-related information, state, and user-facing callbacks */
+ struct aws_http_proxy_user_data *user_data =
+ aws_http_proxy_user_data_new(options->allocator, options, on_channel_setup, on_channel_shutdown);
+ if (user_data == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ return s_create_tunneling_connection(user_data);
+}
+
+static enum aws_http_proxy_connection_type s_determine_proxy_connection_type(
+ enum aws_http_proxy_connection_type proxy_connection_type,
+ bool is_tls_connection) {
+ if (proxy_connection_type != AWS_HPCT_HTTP_LEGACY) {
+ return proxy_connection_type;
+ }
+
+ if (is_tls_connection) {
+ return AWS_HPCT_HTTP_TUNNEL;
+ } else {
+ return AWS_HPCT_HTTP_FORWARD;
+ }
+}
+
+static int s_proxy_uri_init_from_env_variable(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options,
+ struct aws_uri *proxy_uri,
+ bool *found) {
+ struct aws_string *proxy_uri_string = NULL;
+ *found = false;
+ if (options->tls_options) {
+ if (aws_get_environment_value(allocator, s_https_proxy_env_var_low, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "https_proxy environment found");
+ } else if (
+ aws_get_environment_value(allocator, s_https_proxy_env_var, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "HTTPS_PROXY environment found");
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+ } else {
+ if (aws_get_environment_value(allocator, s_http_proxy_env_var_low, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "http_proxy environment found");
+ } else if (
+ aws_get_environment_value(allocator, s_http_proxy_env_var, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "HTTP_PROXY environment found");
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+ }
+ struct aws_byte_cursor proxy_uri_cursor = aws_byte_cursor_from_string(proxy_uri_string);
+ if (aws_uri_init_parse(proxy_uri, allocator, &proxy_uri_cursor)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Could not parse found proxy URI.");
+ aws_string_destroy(proxy_uri_string);
+ return AWS_OP_ERR;
+ }
+ *found = true;
+ aws_string_destroy(proxy_uri_string);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_connect_proxy(const struct aws_http_client_connection_options *options) {
+ if (aws_http_options_validate_proxy_configuration(options)) {
+ return AWS_OP_ERR;
+ }
+
+ enum aws_http_proxy_connection_type proxy_connection_type =
+ s_determine_proxy_connection_type(options->proxy_options->connection_type, options->tls_options != NULL);
+
+ switch (proxy_connection_type) {
+ case AWS_HPCT_HTTP_FORWARD:
+ return s_aws_http_client_connect_via_forwarding_proxy(options);
+
+ case AWS_HPCT_HTTP_TUNNEL:
+ return s_aws_http_client_connect_via_tunneling_proxy(options, NULL, NULL);
+
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+}
+
+static int s_setup_proxy_tls_env_variable(
+ const struct aws_http_client_connection_options *options,
+ struct aws_tls_connection_options *default_tls_connection_options,
+ struct aws_http_proxy_options *proxy_options,
+ struct aws_uri *proxy_uri) {
+ (void)default_tls_connection_options;
+ (void)proxy_uri;
+ if (options->proxy_ev_settings->tls_options) {
+ proxy_options->tls_options = options->proxy_ev_settings->tls_options;
+ } else {
+#ifdef BYO_CRYPTO
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed making default TLS context because of BYO_CRYPTO, set up the tls_options for proxy_env_settings to "
+ "make it work.");
+ return AWS_OP_ERR;
+#else
+ struct aws_tls_ctx *tls_ctx = NULL;
+ struct aws_tls_ctx_options tls_ctx_options;
+ AWS_ZERO_STRUCT(tls_ctx_options);
+ /* create a default tls options */
+ aws_tls_ctx_options_init_default_client(&tls_ctx_options, options->allocator);
+ struct aws_string *proxy_no_verify_peer_string = NULL;
+ if (aws_get_environment_value(
+ options->allocator, s_proxy_no_verify_peer_env_var, &proxy_no_verify_peer_string) == AWS_OP_SUCCESS &&
+ proxy_no_verify_peer_string != NULL) {
+ /* turn off the peer verification, if setup from envrionment variable. Mostly for testing. */
+ aws_tls_ctx_options_set_verify_peer(&tls_ctx_options, false);
+ aws_string_destroy(proxy_no_verify_peer_string);
+ }
+ tls_ctx = aws_tls_client_ctx_new(options->allocator, &tls_ctx_options);
+ aws_tls_ctx_options_clean_up(&tls_ctx_options);
+ if (!tls_ctx) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Failed to create default TLS context.");
+ return AWS_OP_ERR;
+ }
+ aws_tls_connection_options_init_from_ctx(default_tls_connection_options, tls_ctx);
+ /* tls options hold a ref to the ctx */
+ aws_tls_ctx_release(tls_ctx);
+ if (aws_tls_connection_options_set_server_name(
+ default_tls_connection_options, options->allocator, &proxy_uri->host_name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Failed set server name for TLS connection options.");
+ return AWS_OP_ERR;
+ }
+ proxy_options->tls_options = default_tls_connection_options;
+#endif
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_connect_proxy_via_env_variable(const struct aws_http_client_connection_options *options) {
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+ struct aws_uri proxy_uri;
+ AWS_ZERO_STRUCT(proxy_uri);
+ struct aws_tls_connection_options default_tls_connection_options;
+ AWS_ZERO_STRUCT(default_tls_connection_options);
+ bool found = false;
+ bool success = false;
+ if (s_proxy_uri_init_from_env_variable(options->allocator, options, &proxy_uri, &found)) {
+ /* Envrionment is set but failed to parse it */
+ goto done;
+ }
+ if (found) {
+ proxy_options.host = proxy_uri.host_name;
+ proxy_options.port = proxy_uri.port;
+ proxy_options.connection_type = options->proxy_ev_settings->connection_type;
+ if (proxy_options.connection_type == AWS_HPCT_HTTP_LEGACY) {
+ if (options->tls_options) {
+ /* Use tunneling when main connection use TLS. */
+ proxy_options.connection_type = AWS_HPCT_HTTP_TUNNEL;
+ } else {
+ /* Use forwarding proxy when main connection use clear text. */
+ proxy_options.connection_type = AWS_HPCT_HTTP_FORWARD;
+ }
+ }
+ if (aws_byte_cursor_eq_ignore_case(&proxy_uri.scheme, &aws_http_scheme_https)) {
+ if (s_setup_proxy_tls_env_variable(options, &default_tls_connection_options, &proxy_options, &proxy_uri)) {
+ goto done;
+ }
+ }
+ /* Support basic authentication. */
+ if (proxy_uri.password.len) {
+ /* Has no empty password set */
+ struct aws_http_proxy_strategy_basic_auth_options config = {
+ .proxy_connection_type = proxy_options.connection_type,
+ .user_name = proxy_uri.user,
+ .password = proxy_uri.password,
+ };
+ proxy_options.proxy_strategy = aws_http_proxy_strategy_new_basic_auth(options->allocator, &config);
+ }
+ } else {
+ success = true;
+ goto done;
+ }
+ struct aws_http_client_connection_options copied_options = *options;
+ copied_options.proxy_options = &proxy_options;
+ if (s_connect_proxy(&copied_options)) {
+ goto done;
+ }
+ success = true;
+done:
+ aws_tls_connection_options_clean_up(&default_tls_connection_options);
+ aws_http_proxy_strategy_release(proxy_options.proxy_strategy);
+ aws_uri_clean_up(&proxy_uri);
+ if (success && !found) {
+ /* Successfully, but no envrionment variable found. Connect without proxy */
+ return aws_http_client_connect_internal(options, NULL);
+ }
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+/*
+ * Dispatches a proxy-enabled connection request to the appropriate top-level connection function
+ */
+int aws_http_client_connect_via_proxy(const struct aws_http_client_connection_options *options) {
+ if (options->proxy_options == NULL && options->proxy_ev_settings &&
+ options->proxy_ev_settings->env_var_type == AWS_HPEV_ENABLE) {
+ return s_connect_proxy_via_env_variable(options);
+ }
+ return s_connect_proxy(options);
+}
+
+static struct aws_http_proxy_config *s_aws_http_proxy_config_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options,
+ enum aws_http_proxy_connection_type override_proxy_connection_type) {
+ AWS_FATAL_ASSERT(proxy_options != NULL);
+
+ struct aws_http_proxy_config *config = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_config));
+ if (config == NULL) {
+ return NULL;
+ }
+
+ config->allocator = allocator;
+ config->connection_type = override_proxy_connection_type;
+
+ if (aws_byte_buf_init_copy_from_cursor(&config->host, allocator, proxy_options->host)) {
+ goto on_error;
+ }
+
+ if (proxy_options->tls_options) {
+ config->tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(config->tls_options, proxy_options->tls_options)) {
+ goto on_error;
+ }
+ }
+
+ config->port = proxy_options->port;
+
+ if (proxy_options->proxy_strategy != NULL) {
+ config->proxy_strategy = aws_http_proxy_strategy_acquire(proxy_options->proxy_strategy);
+ } else if (proxy_options->auth_type == AWS_HPAT_BASIC) {
+ struct aws_http_proxy_strategy_basic_auth_options basic_config;
+ AWS_ZERO_STRUCT(basic_config);
+
+ basic_config.proxy_connection_type = override_proxy_connection_type;
+ basic_config.user_name = proxy_options->auth_username;
+ basic_config.password = proxy_options->auth_password;
+
+ config->proxy_strategy = aws_http_proxy_strategy_new_basic_auth(allocator, &basic_config);
+ }
+
+ if (config->proxy_strategy == NULL) {
+ switch (override_proxy_connection_type) {
+ case AWS_HPCT_HTTP_FORWARD:
+ config->proxy_strategy = aws_http_proxy_strategy_new_forwarding_identity(allocator);
+ break;
+
+ case AWS_HPCT_HTTP_TUNNEL:
+ config->proxy_strategy = aws_http_proxy_strategy_new_tunneling_one_time_identity(allocator);
+ break;
+
+ default:
+ break;
+ }
+
+ if (config->proxy_strategy == NULL) {
+ goto on_error;
+ }
+ }
+
+ return config;
+
+on_error:
+
+ aws_http_proxy_config_destroy(config);
+
+ return NULL;
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_connection_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options) {
+ AWS_FATAL_ASSERT(options != NULL);
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ return s_aws_http_proxy_config_new(
+ allocator,
+ options->proxy_options,
+ s_determine_proxy_connection_type(options->proxy_options->connection_type, options->tls_options != NULL));
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_manager_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options) {
+ AWS_FATAL_ASSERT(options != NULL);
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ return s_aws_http_proxy_config_new(
+ allocator,
+ options->proxy_options,
+ s_determine_proxy_connection_type(
+ options->proxy_options->connection_type, options->tls_connection_options != NULL));
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_tunneling_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options) {
+
+ return s_aws_http_proxy_config_new(allocator, proxy_options, AWS_HPCT_HTTP_TUNNEL);
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options) {
+ if (proxy_options->connection_type == AWS_HPCT_HTTP_LEGACY) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_PROXY_NEGOTIATION, "LEGACY type is not supported to create proxy config");
+ return NULL;
+ }
+
+ return s_aws_http_proxy_config_new(allocator, proxy_options, proxy_options->connection_type);
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options_with_tls_info(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options,
+ bool is_tls_connection) {
+ AWS_FATAL_ASSERT(proxy_options != NULL);
+
+ return s_aws_http_proxy_config_new(
+ allocator, proxy_options, s_determine_proxy_connection_type(proxy_options->connection_type, is_tls_connection));
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_clone(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_config *proxy_config) {
+
+ AWS_FATAL_ASSERT(proxy_config != NULL);
+
+ struct aws_http_proxy_config *config = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_config));
+ if (config == NULL) {
+ return NULL;
+ }
+
+ config->connection_type = proxy_config->connection_type;
+
+ if (aws_byte_buf_init_copy_from_cursor(&config->host, allocator, aws_byte_cursor_from_buf(&proxy_config->host))) {
+ goto on_error;
+ }
+
+ if (proxy_config->tls_options) {
+ config->tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(config->tls_options, proxy_config->tls_options)) {
+ goto on_error;
+ }
+ }
+
+ config->allocator = allocator;
+ config->port = proxy_config->port;
+ config->proxy_strategy = aws_http_proxy_strategy_acquire(proxy_config->proxy_strategy);
+
+ return config;
+
+on_error:
+
+ aws_http_proxy_config_destroy(config);
+
+ return NULL;
+}
+
+void aws_http_proxy_config_destroy(struct aws_http_proxy_config *config) {
+ if (config == NULL) {
+ return;
+ }
+
+ aws_byte_buf_clean_up(&config->host);
+
+ if (config->tls_options) {
+ aws_tls_connection_options_clean_up(config->tls_options);
+ aws_mem_release(config->allocator, config->tls_options);
+ }
+
+ aws_http_proxy_strategy_release(config->proxy_strategy);
+
+ aws_mem_release(config->allocator, config);
+}
+
+void aws_http_proxy_options_init_from_config(
+ struct aws_http_proxy_options *options,
+ const struct aws_http_proxy_config *config) {
+ AWS_FATAL_ASSERT(options && config);
+
+ options->connection_type = config->connection_type;
+ options->host = aws_byte_cursor_from_buf(&config->host);
+ options->port = config->port;
+ options->tls_options = config->tls_options;
+ options->proxy_strategy = config->proxy_strategy;
+}
+
+int aws_http_options_validate_proxy_configuration(const struct aws_http_client_connection_options *options) {
+ if (options == NULL || options->proxy_options == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ enum aws_http_proxy_connection_type proxy_type = options->proxy_options->connection_type;
+ if (proxy_type == AWS_HPCT_HTTP_FORWARD && options->tls_options != NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct aws_http_proxy_strategy *proxy_strategy = options->proxy_options->proxy_strategy;
+ if (proxy_strategy != NULL) {
+ if (proxy_strategy->proxy_connection_type != proxy_type) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_proxied_socket_channel_user_data {
+ struct aws_allocator *allocator;
+ struct aws_client_bootstrap *bootstrap;
+ struct aws_channel *channel;
+ aws_client_bootstrap_on_channel_event_fn *original_setup_callback;
+ aws_client_bootstrap_on_channel_event_fn *original_shutdown_callback;
+ void *original_user_data;
+};
+
+static void s_proxied_socket_channel_user_data_destroy(struct aws_proxied_socket_channel_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+
+ aws_client_bootstrap_release(user_data->bootstrap);
+
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct aws_proxied_socket_channel_user_data *s_proxied_socket_channel_user_data_new(
+ struct aws_allocator *allocator,
+ struct aws_socket_channel_bootstrap_options *channel_options) {
+ struct aws_proxied_socket_channel_user_data *user_data =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_proxied_socket_channel_user_data));
+ if (user_data == NULL) {
+ return NULL;
+ }
+
+ user_data->allocator = allocator;
+ user_data->original_setup_callback = channel_options->setup_callback;
+ user_data->original_shutdown_callback = channel_options->shutdown_callback;
+ user_data->original_user_data = channel_options->user_data;
+ user_data->bootstrap = aws_client_bootstrap_acquire(channel_options->bootstrap);
+
+ return user_data;
+}
+
+static void s_http_proxied_socket_channel_setup(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ struct aws_proxied_socket_channel_user_data *proxied_user_data = user_data;
+
+ if (error_code != AWS_ERROR_SUCCESS || channel == NULL) {
+ proxied_user_data->original_setup_callback(
+ proxied_user_data->bootstrap, error_code, NULL, proxied_user_data->original_user_data);
+ s_proxied_socket_channel_user_data_destroy(proxied_user_data);
+ return;
+ }
+
+ proxied_user_data->channel = channel;
+
+ proxied_user_data->original_setup_callback(
+ proxied_user_data->bootstrap,
+ AWS_ERROR_SUCCESS,
+ proxied_user_data->channel,
+ proxied_user_data->original_user_data);
+}
+
+static void s_http_proxied_socket_channel_shutdown(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+ (void)bootstrap;
+ (void)channel;
+ struct aws_proxied_socket_channel_user_data *proxied_user_data = user_data;
+ proxied_user_data->original_shutdown_callback(
+ proxied_user_data->bootstrap, error_code, proxied_user_data->channel, proxied_user_data->original_user_data);
+
+ s_proxied_socket_channel_user_data_destroy(proxied_user_data);
+}
+
+int aws_http_proxy_new_socket_channel(
+ struct aws_socket_channel_bootstrap_options *channel_options,
+ const struct aws_http_proxy_options *proxy_options) {
+
+ AWS_FATAL_ASSERT(channel_options != NULL && channel_options->bootstrap != NULL);
+ AWS_FATAL_ASSERT(proxy_options != NULL);
+
+ if (proxy_options->connection_type != AWS_HPCT_HTTP_TUNNEL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "Creating a raw protocol channel through an http proxy requires a tunneling proxy "
+ "configuration");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (channel_options->tls_options == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "Creating a raw protocol channel through an http proxy requires tls to the endpoint");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_allocator *allocator = channel_options->bootstrap->allocator;
+ struct aws_proxied_socket_channel_user_data *user_data =
+ s_proxied_socket_channel_user_data_new(allocator, channel_options);
+
+ struct aws_http_client_connection_options http_connection_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT;
+ http_connection_options.allocator = allocator;
+ http_connection_options.bootstrap = channel_options->bootstrap;
+ http_connection_options.host_name = aws_byte_cursor_from_c_str(channel_options->host_name);
+ http_connection_options.port = channel_options->port;
+ http_connection_options.socket_options = channel_options->socket_options;
+ http_connection_options.tls_options = channel_options->tls_options;
+ http_connection_options.proxy_options = proxy_options;
+ http_connection_options.user_data = user_data;
+ http_connection_options.on_setup = NULL; /* use channel callbacks, not http callbacks */
+ http_connection_options.on_shutdown = NULL; /* use channel callbacks, not http callbacks */
+ http_connection_options.requested_event_loop = channel_options->requested_event_loop;
+
+ if (s_aws_http_client_connect_via_tunneling_proxy(
+ &http_connection_options, s_http_proxied_socket_channel_setup, s_http_proxied_socket_channel_shutdown)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ s_proxied_socket_channel_user_data_destroy(user_data);
+
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c b/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
new file mode 100644
index 00000000000..3130d91cc3f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
@@ -0,0 +1,1703 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/proxy.h>
+
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/http/private/proxy_impl.h>
+
+#if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4221)
+#endif /* _MSC_VER */
+
+struct aws_http_proxy_negotiator *aws_http_proxy_negotiator_acquire(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ if (proxy_negotiator != NULL) {
+ aws_ref_count_acquire(&proxy_negotiator->ref_count);
+ }
+
+ return proxy_negotiator;
+}
+
+void aws_http_proxy_negotiator_release(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ if (proxy_negotiator != NULL) {
+ aws_ref_count_release(&proxy_negotiator->ref_count);
+ }
+}
+
+struct aws_http_proxy_negotiator *aws_http_proxy_strategy_create_negotiator(
+ struct aws_http_proxy_strategy *strategy,
+ struct aws_allocator *allocator) {
+ if (strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ return strategy->vtable->create_negotiator(strategy, allocator);
+}
+
+enum aws_http_proxy_negotiation_retry_directive aws_http_proxy_negotiator_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ if (proxy_negotiator != NULL) {
+ if (proxy_negotiator->strategy_vtable.tunnelling_vtable->get_retry_directive != NULL) {
+ return proxy_negotiator->strategy_vtable.tunnelling_vtable->get_retry_directive(proxy_negotiator);
+ }
+ }
+
+ return AWS_HPNRD_STOP;
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_acquire(struct aws_http_proxy_strategy *proxy_strategy) {
+ if (proxy_strategy != NULL) {
+ aws_ref_count_acquire(&proxy_strategy->ref_count);
+ }
+
+ return proxy_strategy;
+}
+
+void aws_http_proxy_strategy_release(struct aws_http_proxy_strategy *proxy_strategy) {
+ if (proxy_strategy != NULL) {
+ aws_ref_count_release(&proxy_strategy->ref_count);
+ }
+}
+
+/*****************************************************************************************************************/
+
+enum proxy_negotiator_connect_state {
+ AWS_PNCS_READY,
+ AWS_PNCS_IN_PROGRESS,
+ AWS_PNCS_SUCCESS,
+ AWS_PNCS_FAILURE,
+};
+
+/* Functions for basic auth strategy */
+
+struct aws_http_proxy_strategy_basic_auth {
+ struct aws_allocator *allocator;
+ struct aws_string *user_name;
+ struct aws_string *password;
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+static void s_destroy_basic_auth_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy = proxy_strategy->impl;
+
+ aws_string_destroy(basic_auth_strategy->user_name);
+ aws_string_destroy(basic_auth_strategy->password);
+
+ aws_mem_release(basic_auth_strategy->allocator, basic_auth_strategy);
+}
+
+struct aws_http_proxy_negotiator_basic_auth {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy *strategy;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_destroy_basic_auth_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+
+ aws_http_proxy_strategy_release(basic_auth_negotiator->strategy);
+
+ aws_mem_release(basic_auth_negotiator->allocator, basic_auth_negotiator);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_name, "Proxy-Authorization");
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_basic_prefix, "Basic ");
+
+/*
+ * Adds a proxy authentication header based on the basic authentication mode, rfc7617
+ */
+static int s_add_basic_proxy_authentication_header(
+ struct aws_allocator *allocator,
+ struct aws_http_message *request,
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator) {
+
+ struct aws_byte_buf base64_input_value;
+ AWS_ZERO_STRUCT(base64_input_value);
+
+ struct aws_byte_buf header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ int result = AWS_OP_ERR;
+
+ struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy = basic_auth_negotiator->strategy->impl;
+
+ if (aws_byte_buf_init(
+ &base64_input_value,
+ allocator,
+ basic_auth_strategy->user_name->len + basic_auth_strategy->password->len + 1)) {
+ goto done;
+ }
+
+ /* First build a buffer with "username:password" in it */
+ struct aws_byte_cursor username_cursor = aws_byte_cursor_from_string(basic_auth_strategy->user_name);
+ if (aws_byte_buf_append(&base64_input_value, &username_cursor)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor colon_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":");
+ if (aws_byte_buf_append(&base64_input_value, &colon_cursor)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor password_cursor = aws_byte_cursor_from_string(basic_auth_strategy->password);
+ if (aws_byte_buf_append(&base64_input_value, &password_cursor)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor base64_source_cursor =
+ aws_byte_cursor_from_array(base64_input_value.buffer, base64_input_value.len);
+
+ /* Figure out how much room we need in our final header value buffer */
+ size_t required_size = 0;
+ if (aws_base64_compute_encoded_len(base64_source_cursor.len, &required_size)) {
+ goto done;
+ }
+
+ required_size += s_proxy_authorization_header_basic_prefix->len + 1;
+ if (aws_byte_buf_init(&header_value, allocator, required_size)) {
+ goto done;
+ }
+
+ /* Build the final header value by appending the authorization type and the base64 encoding string together */
+ struct aws_byte_cursor basic_prefix = aws_byte_cursor_from_string(s_proxy_authorization_header_basic_prefix);
+ if (aws_byte_buf_append_dynamic(&header_value, &basic_prefix)) {
+ goto done;
+ }
+
+ if (aws_base64_encode(&base64_source_cursor, &header_value)) {
+ goto done;
+ }
+
+ struct aws_http_header header = {
+ .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name),
+ .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len),
+ };
+
+ if (aws_http_message_add_header(request, header)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_byte_buf_clean_up(&header_value);
+ aws_byte_buf_clean_up(&base64_input_value);
+
+ return result;
+}
+
+int s_basic_auth_forward_add_header(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message) {
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+
+ return s_add_basic_proxy_authentication_header(basic_auth_negotiator->allocator, message, basic_auth_negotiator);
+}
+
+void s_basic_auth_tunnel_add_header(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+ if (basic_auth_negotiator->connect_state != AWS_PNCS_READY) {
+ negotiation_termination_callback(message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, internal_proxy_user_data);
+ return;
+ }
+
+ basic_auth_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+
+ if (s_add_basic_proxy_authentication_header(basic_auth_negotiator->allocator, message, basic_auth_negotiator)) {
+ negotiation_termination_callback(message, aws_last_error(), internal_proxy_user_data);
+ return;
+ }
+
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+}
+
+static int s_basic_auth_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+
+ if (basic_auth_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ basic_auth_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ basic_auth_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_forwarding_vtable s_basic_auth_proxy_negotiator_forwarding_vtable = {
+ .forward_request_transform = s_basic_auth_forward_add_header,
+};
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_basic_auth_proxy_negotiator_tunneling_vtable = {
+ .on_status_callback = s_basic_auth_on_connect_status,
+ .connect_request_transform = s_basic_auth_tunnel_add_header,
+};
+
+static struct aws_http_proxy_negotiator *s_create_basic_auth_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_basic_auth));
+ if (basic_auth_negotiator == NULL) {
+ return NULL;
+ }
+
+ basic_auth_negotiator->allocator = allocator;
+ basic_auth_negotiator->connect_state = AWS_PNCS_READY;
+ basic_auth_negotiator->negotiator_base.impl = basic_auth_negotiator;
+ aws_ref_count_init(
+ &basic_auth_negotiator->negotiator_base.ref_count,
+ &basic_auth_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_basic_auth_negotiator);
+
+ if (proxy_strategy->proxy_connection_type == AWS_HPCT_HTTP_FORWARD) {
+ basic_auth_negotiator->negotiator_base.strategy_vtable.forwarding_vtable =
+ &s_basic_auth_proxy_negotiator_forwarding_vtable;
+ } else {
+ basic_auth_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_basic_auth_proxy_negotiator_tunneling_vtable;
+ }
+
+ basic_auth_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &basic_auth_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_basic_auth_proxy_strategy_vtable = {
+ .create_negotiator = s_create_basic_auth_negotiator,
+};
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_basic_auth(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_basic_auth_options *config) {
+ if (config == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (config->proxy_connection_type != AWS_HPCT_HTTP_FORWARD &&
+ config->proxy_connection_type != AWS_HPCT_HTTP_TUNNEL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_basic_auth));
+ if (basic_auth_strategy == NULL) {
+ return NULL;
+ }
+
+ basic_auth_strategy->strategy_base.impl = basic_auth_strategy;
+ basic_auth_strategy->strategy_base.vtable = &s_basic_auth_proxy_strategy_vtable;
+ basic_auth_strategy->allocator = allocator;
+ basic_auth_strategy->strategy_base.proxy_connection_type = config->proxy_connection_type;
+ aws_ref_count_init(
+ &basic_auth_strategy->strategy_base.ref_count,
+ &basic_auth_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_basic_auth_strategy);
+
+ basic_auth_strategy->user_name = aws_string_new_from_cursor(allocator, &config->user_name);
+ if (basic_auth_strategy->user_name == NULL) {
+ goto on_error;
+ }
+
+ basic_auth_strategy->password = aws_string_new_from_cursor(allocator, &config->password);
+ if (basic_auth_strategy->password == NULL) {
+ goto on_error;
+ }
+
+ return &basic_auth_strategy->strategy_base;
+
+on_error:
+
+ aws_http_proxy_strategy_release(&basic_auth_strategy->strategy_base);
+
+ return NULL;
+}
+
+/*****************************************************************************************************************/
+
+struct aws_http_proxy_strategy_one_time_identity {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_one_time_identity {
+ struct aws_allocator *allocator;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_destroy_one_time_identity_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_one_time_identity *identity_negotiator = proxy_negotiator->impl;
+
+ aws_mem_release(identity_negotiator->allocator, identity_negotiator);
+}
+
+void s_one_time_identity_connect_transform(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_one_time_identity *one_time_identity_negotiator = proxy_negotiator->impl;
+ if (one_time_identity_negotiator->connect_state != AWS_PNCS_READY) {
+ negotiation_termination_callback(message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, internal_proxy_user_data);
+ return;
+ }
+
+ one_time_identity_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+}
+
+static int s_one_time_identity_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+ struct aws_http_proxy_negotiator_one_time_identity *one_time_identity_negotiator = proxy_negotiator->impl;
+
+ if (one_time_identity_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ one_time_identity_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ one_time_identity_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_one_time_identity_proxy_negotiator_tunneling_vtable = {
+ .on_status_callback = s_one_time_identity_on_connect_status,
+ .connect_request_transform = s_one_time_identity_connect_transform,
+};
+
+static struct aws_http_proxy_negotiator *s_create_one_time_identity_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_one_time_identity *identity_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_one_time_identity));
+ if (identity_negotiator == NULL) {
+ return NULL;
+ }
+
+ identity_negotiator->allocator = allocator;
+ identity_negotiator->connect_state = AWS_PNCS_READY;
+ identity_negotiator->negotiator_base.impl = identity_negotiator;
+ aws_ref_count_init(
+ &identity_negotiator->negotiator_base.ref_count,
+ &identity_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_one_time_identity_negotiator);
+
+ identity_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_one_time_identity_proxy_negotiator_tunneling_vtable;
+
+ return &identity_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_one_time_identity_proxy_strategy_vtable = {
+ .create_negotiator = s_create_one_time_identity_negotiator,
+};
+
+static void s_destroy_one_time_identity_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_one_time_identity *identity_strategy = proxy_strategy->impl;
+
+ aws_mem_release(identity_strategy->allocator, identity_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_one_time_identity(
+ struct aws_allocator *allocator) {
+ if (allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_one_time_identity *identity_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_one_time_identity));
+ if (identity_strategy == NULL) {
+ return NULL;
+ }
+
+ identity_strategy->strategy_base.impl = identity_strategy;
+ identity_strategy->strategy_base.vtable = &s_one_time_identity_proxy_strategy_vtable;
+ identity_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+ identity_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &identity_strategy->strategy_base.ref_count,
+ &identity_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_one_time_identity_strategy);
+
+ return &identity_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+
+struct aws_http_proxy_strategy_forwarding_identity {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_forwarding_identity {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_destroy_forwarding_identity_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_forwarding_identity *identity_negotiator = proxy_negotiator->impl;
+
+ aws_mem_release(identity_negotiator->allocator, identity_negotiator);
+}
+
+int s_forwarding_identity_connect_transform(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message) {
+
+ (void)message;
+ (void)proxy_negotiator;
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_forwarding_vtable s_forwarding_identity_proxy_negotiator_tunneling_vtable = {
+ .forward_request_transform = s_forwarding_identity_connect_transform,
+};
+
+static struct aws_http_proxy_negotiator *s_create_forwarding_identity_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_forwarding_identity *identity_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_forwarding_identity));
+ if (identity_negotiator == NULL) {
+ return NULL;
+ }
+
+ identity_negotiator->allocator = allocator;
+ identity_negotiator->negotiator_base.impl = identity_negotiator;
+ aws_ref_count_init(
+ &identity_negotiator->negotiator_base.ref_count,
+ &identity_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_forwarding_identity_negotiator);
+
+ identity_negotiator->negotiator_base.strategy_vtable.forwarding_vtable =
+ &s_forwarding_identity_proxy_negotiator_tunneling_vtable;
+
+ return &identity_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_forwarding_identity_strategy_vtable = {
+ .create_negotiator = s_create_forwarding_identity_negotiator,
+};
+
+static void s_destroy_forwarding_identity_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_forwarding_identity *identity_strategy = proxy_strategy->impl;
+
+ aws_mem_release(identity_strategy->allocator, identity_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_forwarding_identity(struct aws_allocator *allocator) {
+ if (allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_forwarding_identity *identity_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_forwarding_identity));
+ if (identity_strategy == NULL) {
+ return NULL;
+ }
+
+ identity_strategy->strategy_base.impl = identity_strategy;
+ identity_strategy->strategy_base.vtable = &s_forwarding_identity_strategy_vtable;
+ identity_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_FORWARD;
+ identity_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &identity_strategy->strategy_base.ref_count,
+ &identity_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_forwarding_identity_strategy);
+
+ return &identity_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+/* kerberos */
+
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_kerberos_prefix, "Negotiate ");
+
+struct aws_http_proxy_strategy_tunneling_kerberos {
+ struct aws_allocator *allocator;
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ void *get_token_user_data;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_tunneling_kerberos {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy *strategy;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ /*
+ * ToDo: make adaptive and add any state needed here
+ *
+ * Likely things include response code (from the vanilla CONNECT) and the appropriate headers in
+ * the response
+ */
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+/*
+ * Adds a proxy authentication header based on the user kerberos authentication token
+ * This uses a token that is already base64 encoded
+ */
+static int s_add_kerberos_proxy_usertoken_authentication_header(
+ struct aws_allocator *allocator,
+ struct aws_http_message *request,
+ struct aws_byte_cursor user_token) {
+
+ struct aws_byte_buf header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ int result = AWS_OP_ERR;
+
+ if (aws_byte_buf_init(
+ &header_value, allocator, s_proxy_authorization_header_kerberos_prefix->len + user_token.len)) {
+ goto done;
+ }
+
+ /* First append proxy authorization header kerberos prefix */
+ struct aws_byte_cursor auth_header_cursor =
+ aws_byte_cursor_from_string(s_proxy_authorization_header_kerberos_prefix);
+ if (aws_byte_buf_append(&header_value, &auth_header_cursor)) {
+ goto done;
+ }
+
+ /* Append token to it */
+ if (aws_byte_buf_append(&header_value, &user_token)) {
+ goto done;
+ }
+
+ struct aws_http_header header = {
+ .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name),
+ .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len),
+ };
+
+ if (aws_http_message_add_header(request, header)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_byte_buf_clean_up(&header_value);
+ return result;
+}
+
+static void s_kerberos_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+ struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy = kerberos_negotiator->strategy->impl;
+
+ int result = AWS_OP_ERR;
+ int error_code = AWS_ERROR_SUCCESS;
+ struct aws_string *kerberos_token = NULL;
+
+ if (kerberos_negotiator->connect_state == AWS_PNCS_FAILURE) {
+ error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ goto done;
+ }
+
+ if (kerberos_negotiator->connect_state != AWS_PNCS_READY) {
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto done;
+ }
+
+ kerberos_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+
+ kerberos_token = kerberos_strategy->get_token(kerberos_strategy->get_token_user_data, &error_code);
+ if (kerberos_token == NULL || error_code != AWS_ERROR_SUCCESS) {
+ goto done;
+ }
+
+ /*transform the header with proxy authenticate:Negotiate and kerberos token*/
+ if (s_add_kerberos_proxy_usertoken_authentication_header(
+ kerberos_negotiator->allocator, message, aws_byte_cursor_from_string(kerberos_token))) {
+ error_code = aws_last_error();
+ goto done;
+ }
+
+ kerberos_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+ negotiation_termination_callback(message, error_code, internal_proxy_user_data);
+ } else {
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+ }
+
+ aws_string_destroy(kerberos_token);
+}
+
+static int s_kerberos_on_incoming_header_adaptive(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers) {
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+ (void)kerberos_negotiator;
+ (void)header_block;
+ (void)header_array;
+ (void)num_headers;
+
+ /* TODO: process vanilla CONNECT response headers here to improve usage/application */
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_kerberos_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+
+ /* TODO: process status code of vanilla CONNECT request here to improve usage/application */
+
+ if (kerberos_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ kerberos_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ kerberos_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_kerberos_on_incoming_body(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data) {
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+ (void)kerberos_negotiator;
+ (void)data;
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_kerberos_proxy_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_kerberos_on_incoming_body,
+ .on_incoming_headers_callback = s_kerberos_on_incoming_header_adaptive,
+ .on_status_callback = s_kerberos_on_connect_status,
+ .connect_request_transform = s_kerberos_tunnel_transform_connect,
+};
+
+static void s_destroy_tunneling_kerberos_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+
+ aws_http_proxy_strategy_release(kerberos_negotiator->strategy);
+
+ aws_mem_release(kerberos_negotiator->allocator, kerberos_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_kerberos_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_kerberos));
+ if (kerberos_negotiator == NULL) {
+ return NULL;
+ }
+
+ kerberos_negotiator->allocator = allocator;
+ kerberos_negotiator->negotiator_base.impl = kerberos_negotiator;
+ aws_ref_count_init(
+ &kerberos_negotiator->negotiator_base.ref_count,
+ &kerberos_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_kerberos_negotiator);
+
+ kerberos_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_kerberos_proxy_negotiator_tunneling_vtable;
+
+ kerberos_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &kerberos_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_kerberos_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_kerberos_negotiator,
+};
+
+static void s_destroy_tunneling_kerberos_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy = proxy_strategy->impl;
+
+ aws_mem_release(kerberos_strategy->allocator, kerberos_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_kerberos(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_kerberos_options *config) {
+
+ if (allocator == NULL || config == NULL || config->get_token == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_kerberos));
+ if (kerberos_strategy == NULL) {
+ return NULL;
+ }
+
+ kerberos_strategy->strategy_base.impl = kerberos_strategy;
+ kerberos_strategy->strategy_base.vtable = &s_tunneling_kerberos_strategy_vtable;
+ kerberos_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+ kerberos_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &kerberos_strategy->strategy_base.ref_count,
+ &kerberos_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_kerberos_strategy);
+
+ kerberos_strategy->get_token = config->get_token;
+ kerberos_strategy->get_token_user_data = config->get_token_user_data;
+
+ return &kerberos_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+
+struct aws_http_proxy_strategy_tunneling_ntlm {
+ struct aws_allocator *allocator;
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ aws_http_proxy_negotiation_get_challenge_token_sync_fn *get_challenge_token;
+
+ void *get_challenge_token_user_data;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_tunneling_ntlm {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy *strategy;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ struct aws_string *challenge_token;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_ntlm_prefix, "NTLM ");
+
+/*
+ * Adds a proxy authentication header based on ntlm credential or response provided by user
+ */
+static int s_add_ntlm_proxy_usertoken_authentication_header(
+ struct aws_allocator *allocator,
+ struct aws_http_message *request,
+ struct aws_byte_cursor credential_response) {
+
+ struct aws_byte_buf header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ int result = AWS_OP_ERR;
+
+ if (aws_byte_buf_init(
+ &header_value, allocator, s_proxy_authorization_header_ntlm_prefix->len + credential_response.len)) {
+ goto done;
+ }
+
+ /* First append proxy authorization header prefix */
+ struct aws_byte_cursor auth_header_cursor = aws_byte_cursor_from_string(s_proxy_authorization_header_ntlm_prefix);
+ if (aws_byte_buf_append(&header_value, &auth_header_cursor)) {
+ goto done;
+ }
+
+ /* Append the credential response to it; assumes already encoded properly (base64) */
+ if (aws_byte_buf_append(&header_value, &credential_response)) {
+ goto done;
+ }
+
+ struct aws_http_header header = {
+ .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name),
+ .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len),
+ };
+
+ if (aws_http_message_add_header(request, header)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_byte_buf_clean_up(&header_value);
+ return result;
+}
+
+static void s_ntlm_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy = ntlm_negotiator->strategy->impl;
+
+ int result = AWS_OP_ERR;
+ int error_code = AWS_ERROR_SUCCESS;
+ struct aws_string *challenge_answer_token = NULL;
+ struct aws_byte_cursor challenge_token;
+ AWS_ZERO_STRUCT(challenge_token);
+
+ if (ntlm_negotiator->connect_state == AWS_PNCS_FAILURE) {
+ error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ goto done;
+ }
+
+ if (ntlm_negotiator->connect_state != AWS_PNCS_READY) {
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto done;
+ }
+
+ if (ntlm_negotiator->challenge_token == NULL) {
+ error_code = AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING;
+ goto done;
+ }
+
+ ntlm_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ challenge_token = aws_byte_cursor_from_string(ntlm_negotiator->challenge_token);
+ challenge_answer_token =
+ ntlm_strategy->get_challenge_token(ntlm_strategy->get_challenge_token_user_data, &challenge_token, &error_code);
+
+ if (challenge_answer_token == NULL || error_code != AWS_ERROR_SUCCESS) {
+ goto done;
+ }
+
+ /*transform the header with proxy authenticate:Negotiate and kerberos token*/
+ if (s_add_ntlm_proxy_usertoken_authentication_header(
+ ntlm_negotiator->allocator, message, aws_byte_cursor_from_string(challenge_answer_token))) {
+ error_code = aws_last_error();
+ goto done;
+ }
+
+ ntlm_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+ negotiation_termination_callback(message, error_code, internal_proxy_user_data);
+ } else {
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+ }
+
+ aws_string_destroy(challenge_answer_token);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_ntlm_challenge_token_header, "Proxy-Authenticate");
+
+static int s_ntlm_on_incoming_header_adaptive(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+
+ /*
+ * only extract the challenge before we've started our own CONNECT attempt
+ *
+ * ToDo: we currently overwrite previous challenge tokens since it is unknown if multiple CONNECT requests
+ * cause new challenges to be issued such that old challenges become invalid even if successfully computed
+ */
+ if (ntlm_negotiator->connect_state == AWS_PNCS_READY) {
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ struct aws_byte_cursor proxy_authenticate_header_name =
+ aws_byte_cursor_from_string(s_ntlm_challenge_token_header);
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_byte_cursor header_name_cursor = header_array[i].name;
+ if (aws_byte_cursor_eq_ignore_case(&proxy_authenticate_header_name, &header_name_cursor)) {
+ aws_string_destroy(ntlm_negotiator->challenge_token);
+
+ struct aws_byte_cursor challenge_value_cursor = header_array[i].value;
+ ntlm_negotiator->challenge_token =
+ aws_string_new_from_cursor(ntlm_negotiator->allocator, &challenge_value_cursor);
+ break;
+ }
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ntlm_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+
+ if (ntlm_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ ntlm_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ ntlm_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ntlm_on_incoming_body(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+ (void)ntlm_negotiator;
+ (void)data;
+
+ return AWS_OP_SUCCESS;
+}
+
+static enum aws_http_proxy_negotiation_retry_directive s_ntlm_tunnel_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ (void)proxy_negotiator;
+
+ return AWS_HPNRD_CURRENT_CONNECTION;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_ntlm_proxy_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_ntlm_on_incoming_body,
+ .on_incoming_headers_callback = s_ntlm_on_incoming_header_adaptive,
+ .on_status_callback = s_ntlm_on_connect_status,
+ .connect_request_transform = s_ntlm_tunnel_transform_connect,
+ .get_retry_directive = s_ntlm_tunnel_get_retry_directive,
+};
+
+static void s_destroy_tunneling_ntlm_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+
+ aws_string_destroy(ntlm_negotiator->challenge_token);
+ aws_http_proxy_strategy_release(ntlm_negotiator->strategy);
+
+ aws_mem_release(ntlm_negotiator->allocator, ntlm_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_ntlm_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_ntlm));
+ if (ntlm_negotiator == NULL) {
+ return NULL;
+ }
+
+ ntlm_negotiator->allocator = allocator;
+ ntlm_negotiator->negotiator_base.impl = ntlm_negotiator;
+ aws_ref_count_init(
+ &ntlm_negotiator->negotiator_base.ref_count,
+ &ntlm_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_negotiator);
+
+ ntlm_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_ntlm_proxy_negotiator_tunneling_vtable;
+
+ ntlm_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &ntlm_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_ntlm_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_ntlm_negotiator,
+};
+
+static void s_destroy_tunneling_ntlm_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy = proxy_strategy->impl;
+
+ aws_mem_release(ntlm_strategy->allocator, ntlm_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *config) {
+
+ if (allocator == NULL || config == NULL || config->get_challenge_token == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_ntlm));
+ if (ntlm_strategy == NULL) {
+ return NULL;
+ }
+
+ ntlm_strategy->strategy_base.impl = ntlm_strategy;
+ ntlm_strategy->strategy_base.vtable = &s_tunneling_ntlm_strategy_vtable;
+ ntlm_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+
+ ntlm_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &ntlm_strategy->strategy_base.ref_count,
+ &ntlm_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_strategy);
+
+ ntlm_strategy->get_challenge_token = config->get_challenge_token;
+ ntlm_strategy->get_challenge_token_user_data = config->get_challenge_token_user_data;
+
+ return &ntlm_strategy->strategy_base;
+}
+/******************************************************************************************************/
+
+static void s_ntlm_credential_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator = proxy_negotiator->impl;
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy =
+ ntlm_credential_negotiator->strategy->impl;
+
+ int result = AWS_OP_ERR;
+ int error_code = AWS_ERROR_SUCCESS;
+ struct aws_string *token = NULL;
+
+ if (ntlm_credential_negotiator->connect_state == AWS_PNCS_FAILURE) {
+ error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ goto done;
+ }
+
+ if (ntlm_credential_negotiator->connect_state != AWS_PNCS_READY) {
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto done;
+ }
+
+ ntlm_credential_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ token = ntlm_credential_strategy->get_token(ntlm_credential_strategy->get_challenge_token_user_data, &error_code);
+
+ if (token == NULL || error_code != AWS_ERROR_SUCCESS) {
+ goto done;
+ }
+
+ /*transform the header with proxy authenticate:Negotiate and kerberos token*/
+ if (s_add_ntlm_proxy_usertoken_authentication_header(
+ ntlm_credential_negotiator->allocator, message, aws_byte_cursor_from_string(token))) {
+ error_code = aws_last_error();
+ goto done;
+ }
+
+ ntlm_credential_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+ negotiation_termination_callback(message, error_code, internal_proxy_user_data);
+ } else {
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+ }
+
+ aws_string_destroy(token);
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable
+ s_tunneling_ntlm_proxy_credential_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_ntlm_on_incoming_body,
+ .on_incoming_headers_callback = s_ntlm_on_incoming_header_adaptive,
+ .on_status_callback = s_ntlm_on_connect_status,
+ .connect_request_transform = s_ntlm_credential_tunnel_transform_connect,
+};
+
+static void s_destroy_tunneling_ntlm_credential_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator = proxy_negotiator->impl;
+
+ aws_string_destroy(ntlm_credential_negotiator->challenge_token);
+ aws_http_proxy_strategy_release(ntlm_credential_negotiator->strategy);
+
+ aws_mem_release(ntlm_credential_negotiator->allocator, ntlm_credential_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_ntlm_credential_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_ntlm));
+ if (ntlm_credential_negotiator == NULL) {
+ return NULL;
+ }
+
+ ntlm_credential_negotiator->allocator = allocator;
+ ntlm_credential_negotiator->negotiator_base.impl = ntlm_credential_negotiator;
+ aws_ref_count_init(
+ &ntlm_credential_negotiator->negotiator_base.ref_count,
+ &ntlm_credential_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_credential_negotiator);
+
+ ntlm_credential_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_ntlm_proxy_credential_negotiator_tunneling_vtable;
+
+ ntlm_credential_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &ntlm_credential_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_ntlm_credential_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_ntlm_credential_negotiator,
+};
+
+static void s_destroy_tunneling_ntlm_credential_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy = proxy_strategy->impl;
+
+ aws_mem_release(ntlm_credential_strategy->allocator, ntlm_credential_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm_credential(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *config) {
+
+ if (allocator == NULL || config == NULL || config->get_token == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_ntlm));
+ if (ntlm_credential_strategy == NULL) {
+ return NULL;
+ }
+
+ ntlm_credential_strategy->strategy_base.impl = ntlm_credential_strategy;
+ ntlm_credential_strategy->strategy_base.vtable = &s_tunneling_ntlm_credential_strategy_vtable;
+ ntlm_credential_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+
+ ntlm_credential_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &ntlm_credential_strategy->strategy_base.ref_count,
+ &ntlm_credential_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_credential_strategy);
+
+ ntlm_credential_strategy->get_token = config->get_token;
+ ntlm_credential_strategy->get_challenge_token_user_data = config->get_challenge_token_user_data;
+
+ return &ntlm_credential_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+
+#define PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES 4
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_adaptive(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_adaptive_options *config) {
+
+ if (allocator == NULL || config == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy *strategies[PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES];
+
+ uint32_t strategy_count = 0;
+ struct aws_http_proxy_strategy *identity_strategy = NULL;
+ struct aws_http_proxy_strategy *kerberos_strategy = NULL;
+ struct aws_http_proxy_strategy *ntlm_credential_strategy = NULL;
+ struct aws_http_proxy_strategy *ntlm_strategy = NULL;
+ struct aws_http_proxy_strategy *adaptive_sequence_strategy = NULL;
+
+ identity_strategy = aws_http_proxy_strategy_new_tunneling_one_time_identity(allocator);
+ if (identity_strategy == NULL) {
+ goto done;
+ }
+ strategies[strategy_count++] = identity_strategy;
+
+ if (config->kerberos_options != NULL) {
+ kerberos_strategy = aws_http_proxy_strategy_new_tunneling_kerberos(allocator, config->kerberos_options);
+ if (kerberos_strategy == NULL) {
+ goto done;
+ }
+
+ strategies[strategy_count++] = kerberos_strategy;
+ }
+
+ if (config->ntlm_options != NULL) {
+ ntlm_credential_strategy =
+ aws_http_proxy_strategy_new_tunneling_ntlm_credential(allocator, config->ntlm_options);
+ if (ntlm_credential_strategy == NULL) {
+ goto done;
+ }
+
+ strategies[strategy_count++] = ntlm_credential_strategy;
+
+ ntlm_strategy = aws_http_proxy_strategy_new_tunneling_ntlm(allocator, config->ntlm_options);
+ if (ntlm_strategy == NULL) {
+ goto done;
+ }
+
+ strategies[strategy_count++] = ntlm_strategy;
+ }
+
+ AWS_FATAL_ASSERT(strategy_count <= PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES);
+
+ struct aws_http_proxy_strategy_tunneling_sequence_options sequence_config = {
+ .strategies = strategies,
+ .strategy_count = strategy_count,
+ };
+
+ adaptive_sequence_strategy = aws_http_proxy_strategy_new_tunneling_sequence(allocator, &sequence_config);
+ if (adaptive_sequence_strategy == NULL) {
+ goto done;
+ }
+
+done:
+
+ aws_http_proxy_strategy_release(identity_strategy);
+ aws_http_proxy_strategy_release(kerberos_strategy);
+ aws_http_proxy_strategy_release(ntlm_credential_strategy);
+ aws_http_proxy_strategy_release(ntlm_strategy);
+
+ return adaptive_sequence_strategy;
+}
+
+/******************************************************************************************************************/
+
+struct aws_http_proxy_strategy_tunneling_sequence {
+ struct aws_allocator *allocator;
+
+ struct aws_array_list strategies;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_tunneling_sequence {
+ struct aws_allocator *allocator;
+
+ struct aws_array_list negotiators;
+ size_t current_negotiator_transform_index;
+ void *original_internal_proxy_user_data;
+ aws_http_proxy_negotiation_terminate_fn *original_negotiation_termination_callback;
+ aws_http_proxy_negotiation_http_request_forward_fn *original_negotiation_http_request_forward_callback;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_sequence_tunnel_iteration_termination_callback(
+ struct aws_http_message *message,
+ int error_code,
+ void *user_data) {
+
+ struct aws_http_proxy_negotiator *proxy_negotiator = user_data;
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "(id=%p) Proxy negotiation step failed with error %d",
+ (void *)proxy_negotiator,
+ error_code);
+
+ int connection_error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE;
+ if (sequence_negotiator->current_negotiator_transform_index >=
+ aws_array_list_length(&sequence_negotiator->negotiators)) {
+ connection_error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ }
+
+ sequence_negotiator->original_negotiation_termination_callback(
+ message, connection_error_code, sequence_negotiator->original_internal_proxy_user_data);
+}
+
+static void s_sequence_tunnel_iteration_forward_callback(struct aws_http_message *message, void *user_data) {
+ struct aws_http_proxy_negotiator *proxy_negotiator = user_data;
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ sequence_negotiator->original_negotiation_http_request_forward_callback(
+ message, sequence_negotiator->original_internal_proxy_user_data);
+}
+
+static void s_sequence_tunnel_try_next_negotiator(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message) {
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ if (sequence_negotiator->current_negotiator_transform_index >= negotiator_count) {
+ goto on_error;
+ }
+
+ struct aws_http_proxy_negotiator *current_negotiator = NULL;
+ if (aws_array_list_get_at(
+ &sequence_negotiator->negotiators,
+ &current_negotiator,
+ sequence_negotiator->current_negotiator_transform_index++)) {
+ goto on_error;
+ }
+
+ current_negotiator->strategy_vtable.tunnelling_vtable->connect_request_transform(
+ current_negotiator,
+ message,
+ s_sequence_tunnel_iteration_termination_callback,
+ s_sequence_tunnel_iteration_forward_callback,
+ proxy_negotiator);
+
+ return;
+
+on_error:
+
+ sequence_negotiator->original_negotiation_termination_callback(
+ message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, sequence_negotiator->original_internal_proxy_user_data);
+}
+
+static void s_sequence_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ sequence_negotiator->original_internal_proxy_user_data = internal_proxy_user_data;
+ sequence_negotiator->original_negotiation_termination_callback = negotiation_termination_callback;
+ sequence_negotiator->original_negotiation_http_request_forward_callback = negotiation_http_request_forward_callback;
+
+ s_sequence_tunnel_try_next_negotiator(proxy_negotiator, message);
+}
+
+static int s_sequence_on_incoming_headers(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers =
+ negotiator->strategy_vtable.tunnelling_vtable->on_incoming_headers_callback;
+ if (on_incoming_headers != NULL) {
+ (*on_incoming_headers)(negotiator, header_block, header_array, num_headers);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_sequence_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiator_connect_status_fn *on_status =
+ negotiator->strategy_vtable.tunnelling_vtable->on_status_callback;
+ if (on_status != NULL) {
+ (*on_status)(negotiator, status_code);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_sequence_on_incoming_body(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body =
+ negotiator->strategy_vtable.tunnelling_vtable->on_incoming_body_callback;
+ if (on_incoming_body != NULL) {
+ (*on_incoming_body)(negotiator, data);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static enum aws_http_proxy_negotiation_retry_directive s_sequence_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ if (sequence_negotiator->current_negotiator_transform_index <
+ aws_array_list_length(&sequence_negotiator->negotiators)) {
+ struct aws_http_proxy_negotiator *next_negotiator = NULL;
+ aws_array_list_get_at(
+ &sequence_negotiator->negotiators,
+ &next_negotiator,
+ sequence_negotiator->current_negotiator_transform_index);
+
+ enum aws_http_proxy_negotiation_retry_directive next_negotiator_directive =
+ aws_http_proxy_negotiator_get_retry_directive(next_negotiator);
+ if (next_negotiator_directive == AWS_HPNRD_CURRENT_CONNECTION) {
+ return AWS_HPNRD_CURRENT_CONNECTION;
+ } else {
+ return AWS_HPNRD_NEW_CONNECTION;
+ }
+ }
+
+ return AWS_HPNRD_STOP;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_sequence_proxy_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_sequence_on_incoming_body,
+ .on_incoming_headers_callback = s_sequence_on_incoming_headers,
+ .on_status_callback = s_sequence_on_connect_status,
+ .connect_request_transform = s_sequence_tunnel_transform_connect,
+ .get_retry_directive = s_sequence_get_retry_directive,
+};
+
+static void s_destroy_tunneling_sequence_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiator_release(negotiator);
+ }
+
+ aws_array_list_clean_up(&sequence_negotiator->negotiators);
+
+ aws_mem_release(sequence_negotiator->allocator, sequence_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_sequence_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_sequence));
+ if (sequence_negotiator == NULL) {
+ return NULL;
+ }
+
+ sequence_negotiator->allocator = allocator;
+ sequence_negotiator->negotiator_base.impl = sequence_negotiator;
+ aws_ref_count_init(
+ &sequence_negotiator->negotiator_base.ref_count,
+ &sequence_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_sequence_negotiator);
+
+ sequence_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_sequence_proxy_negotiator_tunneling_vtable;
+
+ struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy = proxy_strategy->impl;
+ size_t strategy_count = aws_array_list_length(&sequence_strategy->strategies);
+
+ if (aws_array_list_init_dynamic(
+ &sequence_negotiator->negotiators, allocator, strategy_count, sizeof(struct aws_http_proxy_negotiator *))) {
+ goto on_error;
+ }
+
+ for (size_t i = 0; i < strategy_count; ++i) {
+ struct aws_http_proxy_strategy *strategy = NULL;
+ if (aws_array_list_get_at(&sequence_strategy->strategies, &strategy, i)) {
+ goto on_error;
+ }
+
+ struct aws_http_proxy_negotiator *negotiator = aws_http_proxy_strategy_create_negotiator(strategy, allocator);
+ if (negotiator == NULL) {
+ goto on_error;
+ }
+
+ if (aws_array_list_push_back(&sequence_negotiator->negotiators, &negotiator)) {
+ aws_http_proxy_negotiator_release(negotiator);
+ goto on_error;
+ }
+ }
+
+ return &sequence_negotiator->negotiator_base;
+
+on_error:
+
+ aws_http_proxy_negotiator_release(&sequence_negotiator->negotiator_base);
+
+ return NULL;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_sequence_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_sequence_negotiator,
+};
+
+static void s_destroy_tunneling_sequence_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy = proxy_strategy->impl;
+
+ size_t strategy_count = aws_array_list_length(&sequence_strategy->strategies);
+ for (size_t i = 0; i < strategy_count; ++i) {
+ struct aws_http_proxy_strategy *strategy = NULL;
+ if (aws_array_list_get_at(&sequence_strategy->strategies, &strategy, i)) {
+ continue;
+ }
+
+ aws_http_proxy_strategy_release(strategy);
+ }
+
+ aws_array_list_clean_up(&sequence_strategy->strategies);
+
+ aws_mem_release(sequence_strategy->allocator, sequence_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_sequence(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_sequence_options *config) {
+
+ if (allocator == NULL || config == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_sequence));
+ if (sequence_strategy == NULL) {
+ return NULL;
+ }
+
+ sequence_strategy->strategy_base.impl = sequence_strategy;
+ sequence_strategy->strategy_base.vtable = &s_tunneling_sequence_strategy_vtable;
+ sequence_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+ sequence_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &sequence_strategy->strategy_base.ref_count,
+ &sequence_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_sequence_strategy);
+
+ if (aws_array_list_init_dynamic(
+ &sequence_strategy->strategies,
+ allocator,
+ config->strategy_count,
+ sizeof(struct aws_http_proxy_strategy *))) {
+ goto on_error;
+ }
+
+ for (size_t i = 0; i < config->strategy_count; ++i) {
+ struct aws_http_proxy_strategy *strategy = config->strategies[i];
+
+ if (aws_array_list_push_back(&sequence_strategy->strategies, &strategy)) {
+ goto on_error;
+ }
+
+ aws_http_proxy_strategy_acquire(strategy);
+ }
+
+ return &sequence_strategy->strategy_base;
+
+on_error:
+
+ aws_http_proxy_strategy_release(&sequence_strategy->strategy_base);
+
+ return NULL;
+}
+
+#if defined(_MSC_VER)
+# pragma warning(pop)
+#endif /* _MSC_VER */
diff --git a/contrib/restricted/aws/aws-c-http/source/random_access_set.c b/contrib/restricted/aws/aws-c-http/source/random_access_set.c
new file mode 100644
index 00000000000..20fc12309fa
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/random_access_set.c
@@ -0,0 +1,187 @@
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/allocator.h>
+#include <aws/common/device_random.h>
+#include <aws/http/private/random_access_set.h>
+
+struct aws_random_access_set_impl {
+ struct aws_allocator *allocator;
+ struct aws_array_list list; /* Always store the pointer of the element. */
+ struct aws_hash_table map; /* Map from the element to the index in the array. */
+ aws_hash_callback_destroy_fn *destroy_element_fn;
+};
+
+static void s_impl_destroy(struct aws_random_access_set_impl *impl) {
+ if (!impl) {
+ return;
+ }
+ aws_array_list_clean_up(&impl->list);
+ aws_hash_table_clean_up(&impl->map);
+ aws_mem_release(impl->allocator, impl);
+}
+
+static struct aws_random_access_set_impl *s_impl_new(
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_element_fn,
+ size_t initial_item_allocation) {
+ struct aws_random_access_set_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_random_access_set_impl));
+ impl->allocator = allocator;
+ /* Will always store the pointer of the element. */
+ if (aws_array_list_init_dynamic(&impl->list, allocator, initial_item_allocation, sizeof(void *))) {
+ s_impl_destroy(impl);
+ return NULL;
+ }
+
+ if (aws_hash_table_init(
+ &impl->map, allocator, initial_item_allocation, hash_fn, equals_fn, destroy_element_fn, NULL)) {
+ s_impl_destroy(impl);
+ return NULL;
+ }
+ impl->destroy_element_fn = destroy_element_fn;
+ return impl;
+}
+
+int aws_random_access_set_init(
+ struct aws_random_access_set *set,
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_element_fn,
+ size_t initial_item_allocation) {
+ AWS_FATAL_PRECONDITION(set);
+ AWS_FATAL_PRECONDITION(allocator);
+ AWS_FATAL_PRECONDITION(hash_fn);
+ AWS_FATAL_PRECONDITION(equals_fn);
+
+ struct aws_random_access_set_impl *impl =
+ s_impl_new(allocator, hash_fn, equals_fn, destroy_element_fn, initial_item_allocation);
+ if (!impl) {
+ return AWS_OP_ERR;
+ }
+ set->impl = impl;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_random_access_set_clean_up(struct aws_random_access_set *set) {
+ if (!set) {
+ return;
+ }
+ s_impl_destroy(set->impl);
+}
+
+int aws_random_access_set_add(struct aws_random_access_set *set, const void *element, bool *added) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(element);
+ AWS_PRECONDITION(added);
+ bool exist = false;
+ if (aws_random_access_set_exist(set, element, &exist) || exist) {
+ *added = false;
+ return AWS_OP_SUCCESS;
+ }
+ /* deep copy the pointer of element to store at the array list */
+ if (aws_array_list_push_back(&set->impl->list, (void *)&element)) {
+ goto list_push_error;
+ }
+ if (aws_hash_table_put(&set->impl->map, element, (void *)(aws_array_list_length(&set->impl->list) - 1), NULL)) {
+ goto error;
+ }
+ *added = true;
+ return AWS_OP_SUCCESS;
+error:
+ aws_array_list_pop_back(&set->impl->list);
+list_push_error:
+ *added = false;
+ return AWS_OP_ERR;
+}
+
+int aws_random_access_set_remove(struct aws_random_access_set *set, const void *element) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(element);
+ size_t current_length = aws_array_list_length(&set->impl->list);
+ if (current_length == 0) {
+ /* Nothing to remove */
+ return AWS_OP_SUCCESS;
+ }
+ struct aws_hash_element *find = NULL;
+ /* find and remove the element from table */
+ if (aws_hash_table_find(&set->impl->map, element, &find)) {
+ return AWS_OP_ERR;
+ }
+ if (!find) {
+ /* It's removed already */
+ return AWS_OP_SUCCESS;
+ }
+
+ size_t index_to_remove = (size_t)find->value;
+ if (aws_hash_table_remove_element(&set->impl->map, find)) {
+ return AWS_OP_ERR;
+ }
+ /* If assert code failed, we won't be recovered from the failure */
+ int assert_re = AWS_OP_SUCCESS;
+ (void)assert_re;
+ /* Nothing else can fail after here. */
+ if (index_to_remove != current_length - 1) {
+ /* It's not the last element, we need to swap it with the end of the list and remove the last element */
+ void *last_element = NULL;
+ /* The last element is a pointer of pointer of element. */
+ assert_re = aws_array_list_get_at_ptr(&set->impl->list, &last_element, current_length - 1);
+ AWS_ASSERT(assert_re == AWS_OP_SUCCESS);
+ /* Update the last element index in the table */
+ struct aws_hash_element *element_to_update = NULL;
+ assert_re = aws_hash_table_find(&set->impl->map, *(void **)last_element, &element_to_update);
+ AWS_ASSERT(assert_re == AWS_OP_SUCCESS);
+ AWS_ASSERT(element_to_update != NULL);
+ element_to_update->value = (void *)index_to_remove;
+ /* Swap the last element with the element to remove in the list */
+ aws_array_list_swap(&set->impl->list, index_to_remove, current_length - 1);
+ }
+ /* Remove the current last element from the list */
+ assert_re = aws_array_list_pop_back(&set->impl->list);
+ AWS_ASSERT(assert_re == AWS_OP_SUCCESS);
+ if (set->impl->destroy_element_fn) {
+ set->impl->destroy_element_fn((void *)element);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_random_access_set_random_get_ptr(const struct aws_random_access_set *set, void **out) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(out != NULL);
+ size_t length = aws_array_list_length(&set->impl->list);
+ if (length == 0) {
+ return aws_raise_error(AWS_ERROR_LIST_EMPTY);
+ }
+
+ uint64_t random_64_bit_num = 0;
+ aws_device_random_u64(&random_64_bit_num);
+
+ size_t index = (size_t)random_64_bit_num % length;
+ /* The array list stores the pointer of the element. */
+ return aws_array_list_get_at(&set->impl->list, (void *)out, index);
+}
+
+size_t aws_random_access_set_get_size(const struct aws_random_access_set *set) {
+ return aws_array_list_length(&set->impl->list);
+}
+
+int aws_random_access_set_exist(const struct aws_random_access_set *set, const void *element, bool *exist) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(element);
+ AWS_PRECONDITION(exist);
+ struct aws_hash_element *find = NULL;
+ int re = aws_hash_table_find(&set->impl->map, element, &find);
+ *exist = find != NULL;
+ return re;
+}
+
+int aws_random_access_set_random_get_ptr_index(const struct aws_random_access_set *set, void **out, size_t index) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(out != NULL);
+ return aws_array_list_get_at(&set->impl->list, (void *)out, index);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/request_response.c b/contrib/restricted/aws/aws-c-http/source/request_response.c
new file mode 100644
index 00000000000..c382a3a4d0e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/request_response.c
@@ -0,0 +1,1228 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/server.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+enum {
+ /* Initial capacity for the aws_http_message.headers array_list. */
+ AWS_HTTP_REQUEST_NUM_RESERVED_HEADERS = 16,
+};
+
+bool aws_http_header_name_eq(struct aws_byte_cursor name_a, struct aws_byte_cursor name_b) {
+ return aws_byte_cursor_eq_ignore_case(&name_a, &name_b);
+}
+
+/**
+ * -- Data Structure Notes --
+ * Headers are stored in a linear array, rather than a hash-table of arrays.
+ * The linear array was simpler to implement and may be faster due to having fewer allocations.
+ * The API has been designed so we can swap out the implementation later if desired.
+ *
+ * -- String Storage Notes --
+ * We use a single allocation to hold the name and value of each aws_http_header.
+ * We could optimize storage by using something like a string pool. If we do this, be sure to maintain
+ * the address of existing strings when adding new strings (a dynamic aws_byte_buf would not suffice).
+ */
+struct aws_http_headers {
+ struct aws_allocator *alloc;
+ struct aws_array_list array_list; /* Contains aws_http_header */
+ struct aws_atomic_var refcount;
+};
+
+struct aws_http_headers *aws_http_headers_new(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_http_headers *headers = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_headers));
+ if (!headers) {
+ goto alloc_failed;
+ }
+
+ headers->alloc = allocator;
+ aws_atomic_init_int(&headers->refcount, 1);
+
+ if (aws_array_list_init_dynamic(
+ &headers->array_list, allocator, AWS_HTTP_REQUEST_NUM_RESERVED_HEADERS, sizeof(struct aws_http_header))) {
+ goto array_list_failed;
+ }
+
+ return headers;
+
+array_list_failed:
+ aws_mem_release(headers->alloc, headers);
+alloc_failed:
+ return NULL;
+}
+
+void aws_http_headers_release(struct aws_http_headers *headers) {
+ AWS_PRECONDITION(!headers || headers->alloc);
+ if (!headers) {
+ return;
+ }
+
+ size_t prev_refcount = aws_atomic_fetch_sub(&headers->refcount, 1);
+ if (prev_refcount == 1) {
+ aws_http_headers_clear(headers);
+ aws_array_list_clean_up(&headers->array_list);
+ aws_mem_release(headers->alloc, headers);
+ } else {
+ AWS_ASSERT(prev_refcount != 0);
+ }
+}
+
+void aws_http_headers_acquire(struct aws_http_headers *headers) {
+ AWS_PRECONDITION(headers);
+ aws_atomic_fetch_add(&headers->refcount, 1);
+}
+
+static int s_http_headers_add_header_impl(
+ struct aws_http_headers *headers,
+ const struct aws_http_header *header_orig,
+ bool front) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(header_orig);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&header_orig->name) && aws_byte_cursor_is_valid(&header_orig->value));
+
+ struct aws_http_header header_copy = *header_orig;
+
+ if (header_copy.name.len == 0) {
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME);
+ }
+
+ /* Whitespace around header values is ignored (RFC-7230 - Section 3.2).
+ * Trim it off here, so anyone querying this value has an easier time. */
+ header_copy.value = aws_strutil_trim_http_whitespace(header_copy.value);
+
+ size_t total_len;
+ if (aws_add_size_checked(header_copy.name.len, header_copy.value.len, &total_len)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Store our own copy of the strings.
+ * We put the name and value into the same allocation. */
+ uint8_t *strmem = aws_mem_acquire(headers->alloc, total_len);
+
+ struct aws_byte_buf strbuf = aws_byte_buf_from_empty_array(strmem, total_len);
+ aws_byte_buf_append_and_update(&strbuf, &header_copy.name);
+ aws_byte_buf_append_and_update(&strbuf, &header_copy.value);
+ if (front) {
+ if (aws_array_list_push_front(&headers->array_list, &header_copy)) {
+ goto error;
+ }
+ } else {
+ if (aws_array_list_push_back(&headers->array_list, &header_copy)) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_mem_release(headers->alloc, strmem);
+ return AWS_OP_ERR;
+}
+
+int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header) {
+ /* Add pseudo headers to the front and not checking any violation until we send the header to the wire */
+ bool pseudo = aws_strutil_is_http_pseudo_header_name(header->name);
+ bool front = false;
+ if (pseudo && aws_http_headers_count(headers)) {
+ struct aws_http_header last_header;
+ /* TODO: instead if checking the last header, maybe we can add the pseudo headers to the end of the existing
+ * pseudo headers, which needs to insert to the middle of the array list. */
+ AWS_ZERO_STRUCT(last_header);
+ aws_http_headers_get_index(headers, aws_http_headers_count(headers) - 1, &last_header);
+ front = !aws_strutil_is_http_pseudo_header_name(last_header.name);
+ }
+ return s_http_headers_add_header_impl(headers, header, front);
+}
+
+int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) {
+ struct aws_http_header header = {.name = name, .value = value};
+ return aws_http_headers_add_header(headers, &header);
+}
+
+void aws_http_headers_clear(struct aws_http_headers *headers) {
+ AWS_PRECONDITION(headers);
+
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ /* Storage for name & value is in the same allocation */
+ aws_mem_release(headers->alloc, header->name.ptr);
+ }
+
+ aws_array_list_clear(&headers->array_list);
+}
+
+/* Does not check index */
+static void s_http_headers_erase_index(struct aws_http_headers *headers, size_t index) {
+ struct aws_http_header *header = NULL;
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, index);
+ AWS_ASSUME(header);
+
+ /* Storage for name & value is in the same allocation */
+ aws_mem_release(headers->alloc, header->name.ptr);
+
+ aws_array_list_erase(&headers->array_list, index);
+}
+
+int aws_http_headers_erase_index(struct aws_http_headers *headers, size_t index) {
+ AWS_PRECONDITION(headers);
+
+ if (index >= aws_http_headers_count(headers)) {
+ return aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ }
+
+ s_http_headers_erase_index(headers, index);
+ return AWS_OP_SUCCESS;
+}
+
+/* Erase entries with name, stop at end_index */
+static int s_http_headers_erase(
+ struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ size_t start_index,
+ size_t end_index) {
+ bool erased_any = false;
+ struct aws_http_header *header = NULL;
+
+ /* Iterating in reverse is simpler */
+ for (size_t n = end_index; n > start_index; --n) {
+ const size_t i = n - 1;
+
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ if (aws_http_header_name_eq(header->name, name)) {
+ s_http_headers_erase_index(headers, i);
+ erased_any = true;
+ }
+ }
+
+ if (!erased_any) {
+ return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_headers_erase(struct aws_http_headers *headers, struct aws_byte_cursor name) {
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name));
+
+ return s_http_headers_erase(headers, name, 0, aws_http_headers_count(headers));
+}
+
+int aws_http_headers_erase_value(
+ struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name) && aws_byte_cursor_is_valid(&value));
+
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ if (aws_http_header_name_eq(header->name, name) && aws_byte_cursor_eq(&header->value, &value)) {
+ s_http_headers_erase_index(headers, i);
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+}
+
+int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aws_http_header *array, size_t count) {
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(array, count));
+
+ const size_t orig_count = aws_http_headers_count(headers);
+
+ for (size_t i = 0; i < count; ++i) {
+ if (aws_http_headers_add_header(headers, &array[i])) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ /* Erase headers from the end until we're back to our previous state */
+ for (size_t new_count = aws_http_headers_count(headers); new_count > orig_count; --new_count) {
+ s_http_headers_erase_index(headers, new_count - 1);
+ }
+
+ return AWS_OP_ERR;
+}
+
+int aws_http_headers_set(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) {
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name) && aws_byte_cursor_is_valid(&value));
+
+ const size_t prev_count = aws_http_headers_count(headers);
+ bool pseudo = aws_strutil_is_http_pseudo_header_name(name);
+ const size_t start = pseudo ? 1 : 0;
+ struct aws_http_header header = {.name = name, .value = value};
+ if (s_http_headers_add_header_impl(headers, &header, pseudo)) {
+ return AWS_OP_ERR;
+ }
+ /* Erase pre-existing headers AFTER add, in case name or value was referencing their memory. */
+ s_http_headers_erase(headers, name, start, prev_count);
+ return AWS_OP_SUCCESS;
+}
+
+size_t aws_http_headers_count(const struct aws_http_headers *headers) {
+ AWS_PRECONDITION(headers);
+
+ return aws_array_list_length(&headers->array_list);
+}
+
+int aws_http_headers_get_index(
+ const struct aws_http_headers *headers,
+ size_t index,
+ struct aws_http_header *out_header) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(out_header);
+
+ return aws_array_list_get_at(&headers->array_list, out_header, index);
+}
+
+/* RFC-9110 - 5.3
+ * A recipient MAY combine multiple field lines within a field section that
+ * have the same field name into one field line, without changing the semantics
+ * of the message, by appending each subsequent field line value to the initial
+ * field line value in order, separated by a comma (",") and optional whitespace
+ * (OWS, defined in Section 5.6.3). For consistency, use comma SP. */
+AWS_HTTP_API
+struct aws_string *aws_http_headers_get_all(const struct aws_http_headers *headers, struct aws_byte_cursor name) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name));
+
+ struct aws_string *value_str = NULL;
+
+ const struct aws_byte_cursor separator = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(", ");
+
+ struct aws_byte_buf value_builder;
+ aws_byte_buf_init(&value_builder, headers->alloc, 0);
+ bool found = false;
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ if (aws_http_header_name_eq(name, header->name)) {
+ if (!found) {
+ found = true;
+ } else {
+ aws_byte_buf_append_dynamic(&value_builder, &separator);
+ }
+ aws_byte_buf_append_dynamic(&value_builder, &header->value);
+ }
+ }
+
+ if (found) {
+ value_str = aws_string_new_from_buf(headers->alloc, &value_builder);
+ } else {
+ aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+ }
+
+ aws_byte_buf_clean_up(&value_builder);
+ return value_str;
+}
+
+int aws_http_headers_get(
+ const struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor *out_value) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(out_value);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name));
+
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ if (aws_http_header_name_eq(header->name, name)) {
+ *out_value = header->value;
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+}
+
+bool aws_http_headers_has(const struct aws_http_headers *headers, struct aws_byte_cursor name) {
+
+ struct aws_byte_cursor out_value;
+ if (aws_http_headers_get(headers, name, &out_value)) {
+ return false;
+ }
+ return true;
+}
+
+int aws_http2_headers_get_request_method(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_method) {
+ return aws_http_headers_get(h2_headers, aws_http_header_method, out_method);
+}
+
+int aws_http2_headers_get_request_scheme(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_scheme) {
+ return aws_http_headers_get(h2_headers, aws_http_header_scheme, out_scheme);
+}
+
+int aws_http2_headers_get_request_authority(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_authority) {
+ return aws_http_headers_get(h2_headers, aws_http_header_authority, out_authority);
+}
+
+int aws_http2_headers_get_request_path(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_path) {
+ return aws_http_headers_get(h2_headers, aws_http_header_path, out_path);
+}
+
+int aws_http2_headers_get_response_status(const struct aws_http_headers *h2_headers, int *out_status_code) {
+ struct aws_byte_cursor status_code_cur;
+ int return_code = aws_http_headers_get(h2_headers, aws_http_header_status, &status_code_cur);
+ if (return_code == AWS_OP_SUCCESS) {
+ uint64_t code_val_u64;
+ if (aws_byte_cursor_utf8_parse_u64(status_code_cur, &code_val_u64)) {
+ return AWS_OP_ERR;
+ }
+ *out_status_code = (int)code_val_u64;
+ }
+ return return_code;
+}
+
+int aws_http2_headers_set_request_method(struct aws_http_headers *h2_headers, struct aws_byte_cursor method) {
+ return aws_http_headers_set(h2_headers, aws_http_header_method, method);
+}
+
+int aws_http2_headers_set_request_scheme(struct aws_http_headers *h2_headers, struct aws_byte_cursor scheme) {
+ return aws_http_headers_set(h2_headers, aws_http_header_scheme, scheme);
+}
+
+int aws_http2_headers_set_request_authority(struct aws_http_headers *h2_headers, struct aws_byte_cursor authority) {
+ return aws_http_headers_set(h2_headers, aws_http_header_authority, authority);
+}
+
+int aws_http2_headers_set_request_path(struct aws_http_headers *h2_headers, struct aws_byte_cursor path) {
+ return aws_http_headers_set(h2_headers, aws_http_header_path, path);
+}
+
+int aws_http2_headers_set_response_status(struct aws_http_headers *h2_headers, int status_code) {
+ /* Status code must fit in 3 digits */
+ if (status_code < 0 || status_code > 999) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ char status_code_str[4] = "000";
+ snprintf(status_code_str, sizeof(status_code_str), "%03d", status_code);
+ struct aws_byte_cursor status_code_cur = aws_byte_cursor_from_c_str(status_code_str);
+ return aws_http_headers_set(h2_headers, aws_http_header_status, status_code_cur);
+}
+
+struct aws_http_message {
+ struct aws_allocator *allocator;
+ struct aws_http_headers *headers;
+ struct aws_input_stream *body_stream;
+ struct aws_atomic_var refcount;
+ enum aws_http_version http_version;
+
+ /* Data specific to the request or response subclasses */
+ union {
+ struct aws_http_message_request_data {
+ struct aws_string *method;
+ struct aws_string *path;
+ } request;
+ struct aws_http_message_response_data {
+ int status;
+ } response;
+ } subclass_data;
+
+ struct aws_http_message_request_data *request_data;
+ struct aws_http_message_response_data *response_data;
+};
+
+static int s_set_string_from_cursor(
+ struct aws_string **dst,
+ struct aws_byte_cursor cursor,
+ struct aws_allocator *alloc) {
+
+ AWS_PRECONDITION(dst);
+
+ /* If the cursor is empty, set dst to NULL */
+ struct aws_string *new_str;
+ if (cursor.len) {
+ new_str = aws_string_new_from_cursor(alloc, &cursor);
+ if (!new_str) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ new_str = NULL;
+ }
+
+ /* Replace existing value */
+ aws_string_destroy(*dst);
+
+ *dst = new_str;
+ return AWS_OP_SUCCESS;
+}
+static struct aws_http_message *s_message_new_common(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers) {
+
+ /* allocation cannot fail */
+ struct aws_http_message *message = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_message));
+
+ message->allocator = allocator;
+ aws_atomic_init_int(&message->refcount, 1);
+
+ if (existing_headers) {
+ message->headers = existing_headers;
+ aws_http_headers_acquire(message->headers);
+ } else {
+ message->headers = aws_http_headers_new(allocator);
+ if (!message->headers) {
+ goto error;
+ }
+ }
+
+ return message;
+error:
+ aws_http_message_destroy(message);
+ return NULL;
+}
+
+static struct aws_http_message *s_message_new_request_common(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers,
+ enum aws_http_version version) {
+
+ struct aws_http_message *message = s_message_new_common(allocator, existing_headers);
+ if (message) {
+ message->request_data = &message->subclass_data.request;
+ message->http_version = version;
+ }
+ return message;
+}
+
+struct aws_http_message *aws_http_message_new_request_with_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(existing_headers);
+
+ return s_message_new_request_common(allocator, existing_headers, AWS_HTTP_VERSION_1_1);
+}
+
+struct aws_http_message *aws_http_message_new_request(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_message_new_request_common(allocator, NULL, AWS_HTTP_VERSION_1_1);
+}
+
+struct aws_http_message *aws_http2_message_new_request(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_message_new_request_common(allocator, NULL, AWS_HTTP_VERSION_2);
+}
+
+static struct aws_http_message *s_http_message_new_response_common(
+ struct aws_allocator *allocator,
+ enum aws_http_version version) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_http_message *message = s_message_new_common(allocator, NULL);
+ if (message) {
+ message->response_data = &message->subclass_data.response;
+ message->response_data->status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ message->http_version = version;
+ }
+ return message;
+}
+
+struct aws_http_message *aws_http_message_new_response(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_http_message_new_response_common(allocator, AWS_HTTP_VERSION_1_1);
+}
+
+struct aws_http_message *aws_http2_message_new_response(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_http_message_new_response_common(allocator, AWS_HTTP_VERSION_2);
+}
+
+void aws_http_message_destroy(struct aws_http_message *message) {
+ aws_http_message_release(message);
+}
+
+struct aws_http_message *aws_http_message_release(struct aws_http_message *message) {
+ /* Note that release() may also be used by new() functions to clean up if something goes wrong */
+ AWS_PRECONDITION(!message || message->allocator);
+ if (!message) {
+ return NULL;
+ }
+
+ size_t prev_refcount = aws_atomic_fetch_sub(&message->refcount, 1);
+ if (prev_refcount == 1) {
+ if (message->request_data) {
+ aws_string_destroy(message->request_data->method);
+ aws_string_destroy(message->request_data->path);
+ }
+
+ aws_http_headers_release(message->headers);
+ aws_input_stream_release(message->body_stream);
+ aws_mem_release(message->allocator, message);
+ } else {
+ AWS_ASSERT(prev_refcount != 0);
+ }
+
+ return NULL;
+}
+
+struct aws_http_message *aws_http_message_acquire(struct aws_http_message *message) {
+ if (message != NULL) {
+ aws_atomic_fetch_add(&message->refcount, 1);
+ }
+
+ return message;
+}
+
+bool aws_http_message_is_request(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->request_data;
+}
+
+bool aws_http_message_is_response(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->response_data;
+}
+
+enum aws_http_version aws_http_message_get_protocol_version(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->http_version;
+}
+
+int aws_http_message_set_request_method(struct aws_http_message *request_message, struct aws_byte_cursor method) {
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&method));
+ AWS_PRECONDITION(request_message->request_data);
+
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ return s_set_string_from_cursor(
+ &request_message->request_data->method, method, request_message->allocator);
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_set_request_method(request_message->headers, method);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_http_message_get_request_method(
+ const struct aws_http_message *request_message,
+ struct aws_byte_cursor *out_method) {
+
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(out_method);
+ AWS_PRECONDITION(request_message->request_data);
+ int error = AWS_ERROR_HTTP_DATA_NOT_AVAILABLE;
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (request_message->request_data->method) {
+ *out_method = aws_byte_cursor_from_string(request_message->request_data->method);
+ return AWS_OP_SUCCESS;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_get_request_method(request_message->headers, out_method);
+ default:
+ error = AWS_ERROR_UNIMPLEMENTED;
+ }
+ }
+
+ AWS_ZERO_STRUCT(*out_method);
+ return aws_raise_error(error);
+}
+
+int aws_http_message_set_request_path(struct aws_http_message *request_message, struct aws_byte_cursor path) {
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&path));
+ AWS_PRECONDITION(request_message->request_data);
+
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ return s_set_string_from_cursor(&request_message->request_data->path, path, request_message->allocator);
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_set_request_path(request_message->headers, path);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_http_message_get_request_path(
+ const struct aws_http_message *request_message,
+ struct aws_byte_cursor *out_path) {
+
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(out_path);
+ AWS_PRECONDITION(request_message->request_data);
+
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (request_message->request_data->path) {
+ *out_path = aws_byte_cursor_from_string(request_message->request_data->path);
+ return AWS_OP_SUCCESS;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_get_request_path(request_message->headers, out_path);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ AWS_ZERO_STRUCT(*out_path);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+}
+
+int aws_http_message_get_response_status(const struct aws_http_message *response_message, int *out_status_code) {
+ AWS_PRECONDITION(response_message);
+ AWS_PRECONDITION(out_status_code);
+ AWS_PRECONDITION(response_message->response_data);
+
+ *out_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+
+ if (response_message->response_data) {
+ switch (response_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (response_message->response_data->status != AWS_HTTP_STATUS_CODE_UNKNOWN) {
+ *out_status_code = response_message->response_data->status;
+ return AWS_OP_SUCCESS;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_get_response_status(response_message->headers, out_status_code);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+}
+
+int aws_http_message_set_response_status(struct aws_http_message *response_message, int status_code) {
+ AWS_PRECONDITION(response_message);
+ AWS_PRECONDITION(response_message->response_data);
+
+ if (response_message->response_data) {
+ /* Status code must be printable with exactly 3 digits */
+ if (status_code >= 0 && status_code <= 999) {
+ switch (response_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ response_message->response_data->status = status_code;
+ return AWS_OP_SUCCESS;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_set_response_status(response_message->headers, status_code);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE);
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+void aws_http_message_set_body_stream(struct aws_http_message *message, struct aws_input_stream *body_stream) {
+ AWS_PRECONDITION(message);
+ /* release previous stream, if any */
+ aws_input_stream_release(message->body_stream);
+
+ message->body_stream = body_stream;
+ if (message->body_stream) {
+ aws_input_stream_acquire(message->body_stream);
+ }
+}
+
+int aws_http1_stream_write_chunk(struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options) {
+ AWS_PRECONDITION(http1_stream);
+ AWS_PRECONDITION(http1_stream->vtable);
+ AWS_PRECONDITION(options);
+ if (!http1_stream->vtable->http1_write_chunk) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/1 stream only function invoked on other stream, ignoring call.",
+ (void *)http1_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ return http1_stream->vtable->http1_write_chunk(http1_stream, options);
+}
+
+int aws_http2_stream_write_data(
+ struct aws_http_stream *http2_stream,
+ const struct aws_http2_stream_write_data_options *options) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ AWS_PRECONDITION(http2_stream->vtable->http2_write_data);
+ AWS_PRECONDITION(options);
+
+ return http2_stream->vtable->http2_write_data(http2_stream, options);
+}
+
+int aws_http1_stream_add_chunked_trailer(
+ struct aws_http_stream *http1_stream,
+ const struct aws_http_headers *trailing_headers) {
+ AWS_PRECONDITION(http1_stream);
+ AWS_PRECONDITION(http1_stream->vtable);
+ AWS_PRECONDITION(trailing_headers);
+ if (!http1_stream->vtable->http1_add_trailer) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/1 stream only function invoked on other stream, ignoring call.",
+ (void *)http1_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ return http1_stream->vtable->http1_add_trailer(http1_stream, trailing_headers);
+}
+
+struct aws_input_stream *aws_http_message_get_body_stream(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->body_stream;
+}
+
+struct aws_http_headers *aws_http_message_get_headers(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->headers;
+}
+
+const struct aws_http_headers *aws_http_message_get_const_headers(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->headers;
+}
+
+int aws_http_message_add_header(struct aws_http_message *message, struct aws_http_header header) {
+ return aws_http_headers_add(message->headers, header.name, header.value);
+}
+
+int aws_http_message_add_header_array(
+ struct aws_http_message *message,
+ const struct aws_http_header *headers,
+ size_t num_headers) {
+
+ return aws_http_headers_add_array(message->headers, headers, num_headers);
+}
+
+int aws_http_message_erase_header(struct aws_http_message *message, size_t index) {
+ return aws_http_headers_erase_index(message->headers, index);
+}
+
+size_t aws_http_message_get_header_count(const struct aws_http_message *message) {
+ return aws_http_headers_count(message->headers);
+}
+
+int aws_http_message_get_header(
+ const struct aws_http_message *message,
+ struct aws_http_header *out_header,
+ size_t index) {
+
+ return aws_http_headers_get_index(message->headers, index, out_header);
+}
+
+struct aws_http_stream *aws_http_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+
+ AWS_PRECONDITION(client_connection);
+ AWS_PRECONDITION(aws_http_connection_is_client(client_connection));
+ AWS_PRECONDITION(options);
+ if (options->self_size == 0 || !options->request || !aws_http_message_is_request(options->request)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create client request, options are invalid.",
+ (void *)client_connection);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* Connection owns stream, and must outlive stream */
+ aws_http_connection_acquire(client_connection);
+
+ struct aws_http_stream *stream = client_connection->vtable->make_request(client_connection, options);
+ if (!stream) {
+ aws_http_connection_release(client_connection);
+ return NULL;
+ }
+
+ return stream;
+}
+
+struct aws_http_message *aws_http2_message_new_from_http1(
+ struct aws_allocator *alloc,
+ const struct aws_http_message *http1_msg) {
+
+ struct aws_http_headers *old_headers = aws_http_message_get_headers(http1_msg);
+ struct aws_http_header header_iter;
+ struct aws_byte_buf lower_name_buf;
+ AWS_ZERO_STRUCT(lower_name_buf);
+ struct aws_http_message *message = aws_http_message_is_request(http1_msg) ? aws_http2_message_new_request(alloc)
+ : aws_http2_message_new_response(alloc);
+ if (!message) {
+ return NULL;
+ }
+ struct aws_http_headers *copied_headers = message->headers;
+ AWS_LOGF_TRACE(AWS_LS_HTTP_GENERAL, "Creating HTTP/2 message from HTTP/1 message id: %p", (void *)http1_msg);
+
+ /* Set pseudo headers from HTTP/1.1 message */
+ if (aws_http_message_is_request(http1_msg)) {
+ struct aws_byte_cursor method;
+ if (aws_http_message_get_request_method(http1_msg, &method)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Failed to create HTTP/2 message from HTTP/1 message, ip: %p, due to no method found.",
+ (void *)http1_msg);
+ /* error will happen when the request is invalid */
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD);
+ goto error;
+ }
+ /* Use add instead of set method to avoid push front to the array list */
+ if (aws_http_headers_add(copied_headers, aws_http_header_method, method)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_method.len,
+ aws_http_header_method.ptr,
+ (int)method.len,
+ method.ptr);
+ /**
+ * we set a default value, "https", for now.
+ * TODO: as we support prior knowledge, we may also want to support http?
+ */
+ struct aws_byte_cursor scheme_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https");
+ if (aws_http_headers_add(copied_headers, aws_http_header_scheme, scheme_cursor)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_scheme.len,
+ aws_http_header_scheme.ptr,
+ (int)scheme_cursor.len,
+ scheme_cursor.ptr);
+
+ /**
+ * An intermediary that forwards a request over HTTP/2 MUST construct an ":authority" pseudo-header field using
+ * the authority information from the control data of the original request. (RFC=9113 8.3.1)
+ */
+ struct aws_byte_cursor host_value;
+ AWS_ZERO_STRUCT(host_value);
+ if (aws_http_headers_get(http1_msg->headers, aws_byte_cursor_from_c_str("host"), &host_value) ==
+ AWS_OP_SUCCESS) {
+ if (aws_http_headers_add(copied_headers, aws_http_header_authority, host_value)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_authority.len,
+ aws_http_header_authority.ptr,
+ (int)host_value.len,
+ host_value.ptr);
+ }
+ /* TODO: If the host headers is missing, the target URI could be the other source of the authority information
+ */
+
+ struct aws_byte_cursor path_cursor;
+ if (aws_http_message_get_request_path(http1_msg, &path_cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Failed to create HTTP/2 message from HTTP/1 message, ip: %p, due to no path found.",
+ (void *)http1_msg);
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH);
+ goto error;
+ }
+ if (aws_http_headers_add(copied_headers, aws_http_header_path, path_cursor)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_path.len,
+ aws_http_header_path.ptr,
+ (int)path_cursor.len,
+ path_cursor.ptr);
+ } else {
+ int status = 0;
+ if (aws_http_message_get_response_status(http1_msg, &status)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Failed to create HTTP/2 response message from HTTP/1 response message, ip: %p, due to no status "
+ "found.",
+ (void *)http1_msg);
+ /* error will happen when the request is invalid */
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE);
+ goto error;
+ }
+ if (aws_http2_headers_set_response_status(copied_headers, status)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%d\" ",
+ (int)aws_http_header_status.len,
+ aws_http_header_status.ptr,
+ status);
+ }
+
+ if (aws_byte_buf_init(&lower_name_buf, alloc, 256)) {
+ goto error;
+ }
+ for (size_t iter = 0; iter < aws_http_headers_count(old_headers); iter++) {
+ aws_byte_buf_reset(&lower_name_buf, false);
+ bool copy_header = true;
+ /* name should be converted to lower case */
+ if (aws_http_headers_get_index(old_headers, iter, &header_iter)) {
+ goto error;
+ }
+ /* append lower case name to the buffer */
+ aws_byte_buf_append_with_lookup(&lower_name_buf, &header_iter.name, aws_lookup_table_to_lower_get());
+ struct aws_byte_cursor lower_name_cursor = aws_byte_cursor_from_buf(&lower_name_buf);
+ enum aws_http_header_name name_enum = aws_http_lowercase_str_to_header_name(lower_name_cursor);
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING:
+ case AWS_HTTP_HEADER_UPGRADE:
+ case AWS_HTTP_HEADER_KEEP_ALIVE:
+ case AWS_HTTP_HEADER_PROXY_CONNECTION:
+ case AWS_HTTP_HEADER_HOST:
+ /**
+ * An intermediary transforming an HTTP/1.x message to HTTP/2 MUST remove connection-specific header
+ * fields as discussed in Section 7.6.1 of [HTTP]. (RFC=9113 8.2.2)
+ */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Skip connection-specific headers - \"%.*s\" ",
+ (int)lower_name_cursor.len,
+ lower_name_cursor.ptr);
+ copy_header = false;
+ break;
+
+ default:
+ break;
+ }
+ if (copy_header) {
+ if (aws_http_headers_add(copied_headers, lower_name_cursor, header_iter.value)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)lower_name_cursor.len,
+ lower_name_cursor.ptr,
+ (int)header_iter.value.len,
+ header_iter.value.ptr);
+ }
+ }
+ aws_byte_buf_clean_up(&lower_name_buf);
+ aws_http_message_set_body_stream(message, aws_http_message_get_body_stream(http1_msg));
+
+ return message;
+error:
+ aws_http_message_release(message);
+ aws_byte_buf_clean_up(&lower_name_buf);
+ return NULL;
+}
+
+int aws_http_stream_activate(struct aws_http_stream *stream) {
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(stream->vtable);
+ AWS_PRECONDITION(stream->vtable->activate);
+ /* make sure it's actually a client calling us. This is always a programmer bug, so just assert and die. */
+ AWS_PRECONDITION(aws_http_connection_is_client(stream->owning_connection));
+
+ return stream->vtable->activate(stream);
+}
+
+struct aws_http_stream *aws_http_stream_new_server_request_handler(
+ const struct aws_http_request_handler_options *options) {
+ AWS_PRECONDITION(options);
+ if (options->self_size == 0 || !options->server_connection ||
+ !aws_http_connection_is_server(options->server_connection)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create server request handler stream, options are invalid.",
+ (void *)options->server_connection);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ return options->server_connection->vtable->new_server_request_handler_stream(options);
+}
+
+int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response) {
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(response);
+ AWS_PRECONDITION(aws_http_message_is_response(response));
+ return stream->owning_connection->vtable->stream_send_response(stream, response);
+}
+
+void aws_http_stream_release(struct aws_http_stream *stream) {
+ if (!stream) {
+ return;
+ }
+
+ size_t prev_refcount = aws_atomic_fetch_sub(&stream->refcount, 1);
+ if (prev_refcount == 1) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Final stream refcount released.", (void *)stream);
+
+ void *user_data = stream->user_data;
+ aws_http_on_stream_destroy_fn *on_destroy_callback = stream->on_destroy;
+
+ struct aws_http_connection *owning_connection = stream->owning_connection;
+ stream->vtable->destroy(stream);
+
+ if (on_destroy_callback) {
+ /* info user that destroy completed. */
+ on_destroy_callback(user_data);
+ }
+ /* Connection needed to outlive stream, but it's free to go now */
+ aws_http_connection_release(owning_connection);
+ } else {
+ AWS_ASSERT(prev_refcount != 0);
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream refcount released, %zu remaining.", (void *)stream, prev_refcount - 1);
+ }
+}
+
+struct aws_http_connection *aws_http_stream_get_connection(const struct aws_http_stream *stream) {
+ AWS_ASSERT(stream);
+ return stream->owning_connection;
+}
+
+int aws_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status) {
+ AWS_ASSERT(stream && stream->client_data);
+
+ if (stream->client_data->response_status == (int)AWS_HTTP_STATUS_CODE_UNKNOWN) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Status code not yet received.", (void *)stream);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+
+ *out_status = stream->client_data->response_status;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_stream_get_incoming_request_method(
+ const struct aws_http_stream *stream,
+ struct aws_byte_cursor *out_method) {
+ AWS_ASSERT(stream && stream->server_data);
+
+ if (!stream->server_data->request_method_str.ptr) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Request method not yet received.", (void *)stream);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+
+ *out_method = stream->server_data->request_method_str;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_stream_get_incoming_request_uri(const struct aws_http_stream *stream, struct aws_byte_cursor *out_uri) {
+ AWS_ASSERT(stream && stream->server_data);
+
+ if (!stream->server_data->request_path.ptr) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Request URI not yet received.", (void *)stream);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+
+ *out_uri = stream->server_data->request_path;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size) {
+ stream->vtable->update_window(stream, increment_size);
+}
+
+uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream) {
+ return stream->id;
+}
+
+int aws_http2_stream_reset(struct aws_http_stream *http2_stream, uint32_t http2_error) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ if (!http2_stream->vtable->http2_reset_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.",
+ (void *)http2_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ return http2_stream->vtable->http2_reset_stream(http2_stream, http2_error);
+}
+
+int aws_http2_stream_get_received_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ AWS_PRECONDITION(out_http2_error);
+ if (!http2_stream->vtable->http2_get_received_error_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.",
+ (void *)http2_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ return http2_stream->vtable->http2_get_received_error_code(http2_stream, out_http2_error);
+}
+
+int aws_http2_stream_get_sent_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ AWS_PRECONDITION(out_http2_error);
+ if (!http2_stream->vtable->http2_get_sent_error_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.",
+ (void *)http2_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ return http2_stream->vtable->http2_get_sent_error_code(http2_stream, out_http2_error);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/statistics.c b/contrib/restricted/aws/aws-c-http/source/statistics.c
new file mode 100644
index 00000000000..ea4e65c1dd0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/statistics.c
@@ -0,0 +1,35 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/statistics.h>
+
+int aws_crt_statistics_http1_channel_init(struct aws_crt_statistics_http1_channel *stats) {
+ AWS_ZERO_STRUCT(*stats);
+ stats->category = AWSCRT_STAT_CAT_HTTP1_CHANNEL;
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_crt_statistics_http1_channel_cleanup(struct aws_crt_statistics_http1_channel *stats) {
+ (void)stats;
+}
+
+void aws_crt_statistics_http1_channel_reset(struct aws_crt_statistics_http1_channel *stats) {
+ stats->pending_outgoing_stream_ms = 0;
+ stats->pending_incoming_stream_ms = 0;
+ stats->current_outgoing_stream_id = 0;
+ stats->current_incoming_stream_id = 0;
+}
+
+void aws_crt_statistics_http2_channel_init(struct aws_crt_statistics_http2_channel *stats) {
+ AWS_ZERO_STRUCT(*stats);
+ stats->category = AWSCRT_STAT_CAT_HTTP2_CHANNEL;
+}
+
+void aws_crt_statistics_http2_channel_reset(struct aws_crt_statistics_http2_channel *stats) {
+ stats->pending_outgoing_stream_ms = 0;
+ stats->pending_incoming_stream_ms = 0;
+ stats->was_inactive = false;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/strutil.c b/contrib/restricted/aws/aws-c-http/source/strutil.c
new file mode 100644
index 00000000000..552535f46d8
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/strutil.c
@@ -0,0 +1,232 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/strutil.h>
+
+static struct aws_byte_cursor s_trim(struct aws_byte_cursor cursor, const bool trim_table[256]) {
+ /* trim leading whitespace */
+ size_t i;
+ for (i = 0; i < cursor.len; ++i) {
+ const uint8_t c = cursor.ptr[i];
+ if (!trim_table[c]) {
+ break;
+ }
+ }
+ cursor.ptr += i;
+ cursor.len -= i;
+
+ /* trim trailing whitespace */
+ for (; cursor.len; --cursor.len) {
+ const uint8_t c = cursor.ptr[cursor.len - 1];
+ if (!trim_table[c]) {
+ break;
+ }
+ }
+
+ return cursor;
+}
+
+static const bool s_http_whitespace_table[256] = {
+ [' '] = true,
+ ['\t'] = true,
+};
+
+struct aws_byte_cursor aws_strutil_trim_http_whitespace(struct aws_byte_cursor cursor) {
+ return s_trim(cursor, s_http_whitespace_table);
+}
+
+/* RFC7230 section 3.2.6:
+ * token = 1*tchar
+ * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+ * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+ * / DIGIT / ALPHA
+ */
+static const bool s_http_token_table[256] = {
+ ['!'] = true, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, ['\''] = true, ['*'] = true, ['+'] = true,
+ ['-'] = true, ['.'] = true, ['^'] = true, ['_'] = true, ['`'] = true, ['|'] = true, ['~'] = true,
+
+ ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, ['5'] = true, ['6'] = true, ['7'] = true,
+ ['8'] = true, ['9'] = true,
+
+ ['A'] = true, ['B'] = true, ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true,
+ ['I'] = true, ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true,
+ ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true, ['X'] = true,
+ ['Y'] = true, ['Z'] = true,
+
+ ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, ['f'] = true, ['g'] = true, ['h'] = true,
+ ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true,
+ ['q'] = true, ['r'] = true, ['s'] = true, ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true,
+ ['y'] = true, ['z'] = true,
+};
+
+/* Same as above, but with uppercase characters removed */
+static const bool s_http_lowercase_token_table[256] = {
+ ['!'] = true, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, ['\''] = true, ['*'] = true, ['+'] = true,
+ ['-'] = true, ['.'] = true, ['^'] = true, ['_'] = true, ['`'] = true, ['|'] = true, ['~'] = true,
+
+ ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, ['5'] = true, ['6'] = true, ['7'] = true,
+ ['8'] = true, ['9'] = true,
+
+ ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, ['f'] = true, ['g'] = true, ['h'] = true,
+ ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true,
+ ['q'] = true, ['r'] = true, ['s'] = true, ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true,
+ ['y'] = true, ['z'] = true,
+};
+
+static bool s_is_token(struct aws_byte_cursor token, const bool token_table[256]) {
+ if (token.len == 0) {
+ return false;
+ }
+
+ for (size_t i = 0; i < token.len; ++i) {
+ const uint8_t c = token.ptr[i];
+ if (token_table[c] == false) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool aws_strutil_is_http_token(struct aws_byte_cursor token) {
+ return s_is_token(token, s_http_token_table);
+}
+
+bool aws_strutil_is_lowercase_http_token(struct aws_byte_cursor token) {
+ return s_is_token(token, s_http_lowercase_token_table);
+}
+
+/* clang-format off */
+/**
+ * Table with true for all octets allowed in field-content,
+ * as defined in RFC7230 section 3.2 and 3.2.6 and RFC5234 appendix-B.1:
+ *
+ * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+ * field-vchar = VCHAR / obs-text
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ */
+static const bool s_http_field_content_table[256] = {
+ /* clang-format off */
+
+ /* whitespace */
+ ['\t'] = true, [' '] = true,
+
+ /* VCHAR = 0x21-7E */
+ [0x21] = true, [0x22] = true, [0x23] = true, [0x24] = true, [0x25] = true, [0x26] = true, [0x27] = true,
+ [0x28] = true, [0x29] = true, [0x2A] = true, [0x2B] = true, [0x2C] = true, [0x2D] = true, [0x2E] = true,
+ [0x2F] = true, [0x30] = true, [0x31] = true, [0x32] = true, [0x33] = true, [0x34] = true, [0x35] = true,
+ [0x36] = true, [0x37] = true, [0x38] = true, [0x39] = true, [0x3A] = true, [0x3B] = true, [0x3C] = true,
+ [0x3D] = true, [0x3E] = true, [0x3F] = true, [0x40] = true, [0x41] = true, [0x42] = true, [0x43] = true,
+ [0x44] = true, [0x45] = true, [0x46] = true, [0x47] = true, [0x48] = true, [0x49] = true, [0x4A] = true,
+ [0x4B] = true, [0x4C] = true, [0x4D] = true, [0x4E] = true, [0x4F] = true, [0x50] = true, [0x51] = true,
+ [0x52] = true, [0x53] = true, [0x54] = true, [0x55] = true, [0x56] = true, [0x57] = true, [0x58] = true,
+ [0x59] = true, [0x5A] = true, [0x5B] = true, [0x5C] = true, [0x5D] = true, [0x5E] = true, [0x5F] = true,
+ [0x60] = true, [0x61] = true, [0x62] = true, [0x63] = true, [0x64] = true, [0x65] = true, [0x66] = true,
+ [0x67] = true, [0x68] = true, [0x69] = true, [0x6A] = true, [0x6B] = true, [0x6C] = true, [0x6D] = true,
+ [0x6E] = true, [0x6F] = true, [0x70] = true, [0x71] = true, [0x72] = true, [0x73] = true, [0x74] = true,
+ [0x75] = true, [0x76] = true, [0x77] = true, [0x78] = true, [0x79] = true, [0x7A] = true, [0x7B] = true,
+ [0x7C] = true, [0x7D] = true, [0x7E] = true,
+
+ /* obs-text = %x80-FF */
+ [0x80] = true, [0x81] = true, [0x82] = true, [0x83] = true, [0x84] = true, [0x85] = true, [0x86] = true,
+ [0x87] = true, [0x88] = true, [0x89] = true, [0x8A] = true, [0x8B] = true, [0x8C] = true, [0x8D] = true,
+ [0x8E] = true, [0x8F] = true, [0x90] = true, [0x91] = true, [0x92] = true, [0x93] = true, [0x94] = true,
+ [0x95] = true, [0x96] = true, [0x97] = true, [0x98] = true, [0x99] = true, [0x9A] = true, [0x9B] = true,
+ [0x9C] = true, [0x9D] = true, [0x9E] = true, [0x9F] = true, [0xA0] = true, [0xA1] = true, [0xA2] = true,
+ [0xA3] = true, [0xA4] = true, [0xA5] = true, [0xA6] = true, [0xA7] = true, [0xA8] = true, [0xA9] = true,
+ [0xAA] = true, [0xAB] = true, [0xAC] = true, [0xAD] = true, [0xAE] = true, [0xAF] = true, [0xB0] = true,
+ [0xB1] = true, [0xB2] = true, [0xB3] = true, [0xB4] = true, [0xB5] = true, [0xB6] = true, [0xB7] = true,
+ [0xB8] = true, [0xB9] = true, [0xBA] = true, [0xBB] = true, [0xBC] = true, [0xBD] = true, [0xBE] = true,
+ [0xBF] = true, [0xC0] = true, [0xC1] = true, [0xC2] = true, [0xC3] = true, [0xC4] = true, [0xC5] = true,
+ [0xC6] = true, [0xC7] = true, [0xC8] = true, [0xC9] = true, [0xCA] = true, [0xCB] = true, [0xCC] = true,
+ [0xCD] = true, [0xCE] = true, [0xCF] = true, [0xD0] = true, [0xD1] = true, [0xD2] = true, [0xD3] = true,
+ [0xD4] = true, [0xD5] = true, [0xD6] = true, [0xD7] = true, [0xD8] = true, [0xD9] = true, [0xDA] = true,
+ [0xDB] = true, [0xDC] = true, [0xDD] = true, [0xDE] = true, [0xDF] = true, [0xE0] = true, [0xE1] = true,
+ [0xE2] = true, [0xE3] = true, [0xE4] = true, [0xE5] = true, [0xE6] = true, [0xE7] = true, [0xE8] = true,
+ [0xE9] = true, [0xEA] = true, [0xEB] = true, [0xEC] = true, [0xED] = true, [0xEE] = true, [0xEF] = true,
+ [0xF0] = true, [0xF1] = true, [0xF2] = true, [0xF3] = true, [0xF4] = true, [0xF5] = true, [0xF6] = true,
+ [0xF7] = true, [0xF8] = true, [0xF9] = true, [0xFA] = true, [0xFB] = true, [0xFC] = true, [0xFD] = true,
+ [0xFE] = true, [0xFF] = true,
+ /* clang-format on */
+};
+
+/**
+ * From RFC7230 section 3.2:
+ * field-value = *( field-content / obs-fold )
+ * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+ *
+ * But we're forbidding obs-fold
+ */
+bool aws_strutil_is_http_field_value(struct aws_byte_cursor cursor) {
+ if (cursor.len == 0) {
+ return true;
+ }
+
+ /* first and last char cannot be whitespace */
+ const uint8_t first_c = cursor.ptr[0];
+ const uint8_t last_c = cursor.ptr[cursor.len - 1];
+ if (s_http_whitespace_table[first_c] || s_http_whitespace_table[last_c]) {
+ return false;
+ }
+
+ /* ensure every char is legal field-content */
+ size_t i = 0;
+ do {
+ const uint8_t c = cursor.ptr[i++];
+ if (s_http_field_content_table[c] == false) {
+ return false;
+ }
+ } while (i < cursor.len);
+
+ return true;
+}
+
+/**
+ * From RFC7230 section 3.1.2:
+ * reason-phrase = *( HTAB / SP / VCHAR / obs-text )
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ */
+bool aws_strutil_is_http_reason_phrase(struct aws_byte_cursor cursor) {
+ for (size_t i = 0; i < cursor.len; ++i) {
+ const uint8_t c = cursor.ptr[i];
+ /* the field-content table happens to allow the exact same characters as reason-phrase */
+ if (s_http_field_content_table[c] == false) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool aws_strutil_is_http_request_target(struct aws_byte_cursor cursor) {
+ if (cursor.len == 0) {
+ return false;
+ }
+
+ /* TODO: Actually check the complete grammar as defined in RFC7230 5.3 and
+ * RFC3986. Currently this just checks whether the sequence is blatantly illegal */
+ size_t i = 0;
+ do {
+ const uint8_t c = cursor.ptr[i++];
+ /* everything <= ' ' is non-visible ascii*/
+ if (c <= ' ') {
+ return false;
+ }
+ } while (i < cursor.len);
+
+ return true;
+}
+
+bool aws_strutil_is_http_pseudo_header_name(struct aws_byte_cursor cursor) {
+ if (cursor.len == 0) {
+ return false;
+ }
+ const uint8_t c = cursor.ptr[0];
+ if (c != ':') {
+ /* short cut */
+ return false;
+ }
+ return true;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket.c b/contrib/restricted/aws/aws-c-http/source/websocket.c
new file mode 100644
index 00000000000..8b57953624d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket.c
@@ -0,0 +1,1790 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_impl.h>
+
+#include <aws/common/atomics.h>
+#include <aws/common/device_random.h>
+#include <aws/common/encoding.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/http/private/websocket_decoder.h>
+#include <aws/http/private/websocket_encoder.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/* TODO: If something goes wrong during normal shutdown, do I change the error_code? */
+
+struct outgoing_frame {
+ struct aws_websocket_send_frame_options def;
+ struct aws_linked_list_node node;
+};
+
+struct aws_websocket {
+ struct aws_allocator *alloc;
+ struct aws_ref_count ref_count;
+ struct aws_channel_handler channel_handler;
+ struct aws_channel_slot *channel_slot;
+ size_t initial_window_size;
+ bool manual_window_update;
+
+ void *user_data;
+ aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin;
+ aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload;
+ aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete;
+
+ struct aws_channel_task move_synced_data_to_thread_task;
+ struct aws_channel_task shutdown_channel_task;
+ struct aws_channel_task increment_read_window_task;
+ struct aws_channel_task waiting_on_payload_stream_task;
+ struct aws_channel_task close_timeout_task;
+ bool is_server;
+
+ /* Data that should only be accessed from the websocket's channel thread. */
+ struct {
+ struct aws_websocket_encoder encoder;
+
+ /* list of outbound frames that have yet to be encoded and sent to the socket */
+ struct aws_linked_list outgoing_frame_list;
+
+ /* current outbound frame being encoded and sent to the socket */
+ struct outgoing_frame *current_outgoing_frame;
+
+ /*
+ * list of outbound frames that have been completely written to the io message heading to the socket.
+ * When the socket write completes we can in turn invoke completion callbacks for all of these frames
+ */
+ struct aws_linked_list write_completion_frames;
+
+ struct aws_websocket_decoder decoder;
+ struct aws_websocket_incoming_frame *current_incoming_frame;
+ struct aws_websocket_incoming_frame incoming_frame_storage;
+
+ /* Payload of incoming PING frame.
+ * The PONG frame we send in response must have an identical payload */
+ struct aws_byte_buf incoming_ping_payload;
+
+ /* If current incoming frame is CONTINUATION, this is the data type it is a continuation of. */
+ enum aws_websocket_opcode continuation_of_opcode;
+
+ /* Amount to increment window after a channel message has been processed. */
+ size_t incoming_message_window_update;
+
+ /* Cached slot to right */
+ struct aws_channel_slot *last_known_right_slot;
+
+ /* True when no more frames will be read, due to:
+ * - a CLOSE frame was received
+ * - decoder error
+ * - channel shutdown in read-dir */
+ bool is_reading_stopped;
+
+ /* True when no more frames will be written, due to:
+ * - a CLOSE frame was sent
+ * - encoder error
+ * - channel shutdown in write-dir */
+ bool is_writing_stopped;
+
+ /* During normal shutdown websocket ensures that a CLOSE frame is sent */
+ bool is_shutting_down_and_waiting_for_close_frame_to_be_written;
+ int channel_shutdown_error_code;
+ bool channel_shutdown_free_scarce_resources_immediately;
+
+ /* Wait until each aws_io_message is completely written to
+ * the socket before sending the next aws_io_message */
+ bool is_waiting_for_write_completion;
+
+ /* If, while writing out data from a payload stream, we experience "read would block",
+ * schedule a task to try again in the near-future. */
+ bool is_waiting_on_payload_stream_task;
+
+ /* True if this websocket is being used as a dumb mid-channel handler.
+ * The websocket will no longer respond to its public API or invoke callbacks. */
+ bool is_midchannel_handler;
+ } thread_data;
+
+ /* Data that may be touched from any thread (lock must be held). */
+ struct {
+ struct aws_mutex lock;
+
+ struct aws_linked_list outgoing_frame_list;
+
+ /* If non-zero, then increment_read_window_task is scheduled */
+ size_t window_increment_size;
+
+ /* Error-code returned by aws_websocket_send_frame() when is_writing_stopped is true */
+ int send_frame_error_code;
+
+ /* Use a task to issue a channel shutdown. */
+ int shutdown_channel_task_error_code;
+ bool is_shutdown_channel_task_scheduled;
+
+ bool is_move_synced_data_to_thread_task_scheduled;
+
+ /* Mirrors variable from thread_data */
+ bool is_midchannel_handler;
+ } synced_data;
+};
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size);
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately);
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler);
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler);
+static void s_handler_destroy(struct aws_channel_handler *handler);
+static void s_websocket_on_refcount_zero(void *user_data);
+
+static int s_encoder_stream_outgoing_payload(struct aws_byte_buf *out_buf, void *user_data);
+
+static int s_decoder_on_frame(const struct aws_websocket_frame *frame, void *user_data);
+static int s_decoder_on_payload(struct aws_byte_cursor data, void *user_data);
+static int s_decoder_on_user_payload(struct aws_websocket *websocket, struct aws_byte_cursor data);
+static int s_decoder_on_midchannel_payload(struct aws_websocket *websocket, struct aws_byte_cursor data);
+
+static void s_destroy_outgoing_frame(struct aws_websocket *websocket, struct outgoing_frame *frame, int error_code);
+static void s_complete_frame_list(struct aws_websocket *websocket, struct aws_linked_list *frames, int error_code);
+static void s_complete_incoming_frame(struct aws_websocket *websocket, int error_code, bool *out_callback_result);
+static void s_finish_shutdown(struct aws_websocket *websocket);
+static void s_io_message_write_completed(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data);
+static int s_send_frame(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_send_frame_options *options,
+ bool from_public_api);
+static bool s_midchannel_send_payload(struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data);
+static void s_midchannel_send_complete(struct aws_websocket *websocket, int error_code, void *user_data);
+static void s_move_synced_data_to_thread_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_increment_read_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_shutdown_channel_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_waiting_on_payload_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_close_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_schedule_channel_shutdown(struct aws_websocket *websocket, int error_code);
+static void s_shutdown_due_to_write_err(struct aws_websocket *websocket, int error_code);
+static void s_shutdown_due_to_read_err(struct aws_websocket *websocket, int error_code);
+static void s_stop_writing(struct aws_websocket *websocket, int send_frame_error_code);
+static void s_try_write_outgoing_frames(struct aws_websocket *websocket);
+
+static struct aws_channel_handler_vtable s_channel_handler_vtable = {
+ .process_read_message = s_handler_process_read_message,
+ .process_write_message = s_handler_process_write_message,
+ .increment_read_window = s_handler_increment_read_window,
+ .shutdown = s_handler_shutdown,
+ .initial_window_size = s_handler_initial_window_size,
+ .message_overhead = s_handler_message_overhead,
+ .destroy = s_handler_destroy,
+};
+
+const char *aws_websocket_opcode_str(uint8_t opcode) {
+ switch (opcode) {
+ case AWS_WEBSOCKET_OPCODE_CONTINUATION:
+ return "continuation";
+ case AWS_WEBSOCKET_OPCODE_TEXT:
+ return "text";
+ case AWS_WEBSOCKET_OPCODE_BINARY:
+ return "binary";
+ case AWS_WEBSOCKET_OPCODE_CLOSE:
+ return "close";
+ case AWS_WEBSOCKET_OPCODE_PING:
+ return "ping";
+ case AWS_WEBSOCKET_OPCODE_PONG:
+ return "pong";
+ default:
+ return "";
+ }
+}
+
+bool aws_websocket_is_data_frame(uint8_t opcode) {
+ /* RFC-6455 Section 5.6: Most significant bit of (4 bit) data frame opcode is 0 */
+ return !(opcode & 0x08);
+}
+
+static void s_lock_synced_data(struct aws_websocket *websocket) {
+ int err = aws_mutex_lock(&websocket->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_websocket *websocket) {
+ int err = aws_mutex_unlock(&websocket->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+struct aws_websocket *aws_websocket_handler_new(const struct aws_websocket_handler_options *options) {
+ struct aws_channel_slot *slot = NULL;
+ struct aws_websocket *websocket = NULL;
+ int err;
+
+ slot = aws_channel_slot_new(options->channel);
+ if (!slot) {
+ goto error;
+ }
+
+ err = aws_channel_slot_insert_end(options->channel, slot);
+ if (err) {
+ goto error;
+ }
+
+ websocket = aws_mem_calloc(options->allocator, 1, sizeof(struct aws_websocket));
+ if (!websocket) {
+ goto error;
+ }
+
+ websocket->alloc = options->allocator;
+ aws_ref_count_init(&websocket->ref_count, websocket, s_websocket_on_refcount_zero);
+ websocket->channel_handler.vtable = &s_channel_handler_vtable;
+ websocket->channel_handler.alloc = options->allocator;
+ websocket->channel_handler.impl = websocket;
+
+ websocket->channel_slot = slot;
+
+ websocket->initial_window_size = options->initial_window_size;
+ websocket->manual_window_update = options->manual_window_update;
+
+ websocket->user_data = options->user_data;
+ websocket->on_incoming_frame_begin = options->on_incoming_frame_begin;
+ websocket->on_incoming_frame_payload = options->on_incoming_frame_payload;
+ websocket->on_incoming_frame_complete = options->on_incoming_frame_complete;
+
+ websocket->is_server = options->is_server;
+
+ aws_channel_task_init(
+ &websocket->move_synced_data_to_thread_task,
+ s_move_synced_data_to_thread_task,
+ websocket,
+ "websocket_move_synced_data_to_thread");
+ aws_channel_task_init(
+ &websocket->shutdown_channel_task, s_shutdown_channel_task, websocket, "websocket_shutdown_channel");
+ aws_channel_task_init(
+ &websocket->increment_read_window_task,
+ s_increment_read_window_task,
+ websocket,
+ "websocket_increment_read_window");
+ aws_channel_task_init(
+ &websocket->waiting_on_payload_stream_task,
+ s_waiting_on_payload_stream_task,
+ websocket,
+ "websocket_waiting_on_payload_stream");
+ aws_channel_task_init(&websocket->close_timeout_task, s_close_timeout_task, websocket, "websocket_close_timeout");
+
+ aws_linked_list_init(&websocket->thread_data.outgoing_frame_list);
+ aws_linked_list_init(&websocket->thread_data.write_completion_frames);
+ aws_byte_buf_init(&websocket->thread_data.incoming_ping_payload, websocket->alloc, 0);
+
+ aws_websocket_encoder_init(&websocket->thread_data.encoder, s_encoder_stream_outgoing_payload, websocket);
+
+ aws_websocket_decoder_init(
+ &websocket->thread_data.decoder, options->allocator, s_decoder_on_frame, s_decoder_on_payload, websocket);
+
+ aws_linked_list_init(&websocket->synced_data.outgoing_frame_list);
+
+ err = aws_mutex_init(&websocket->synced_data.lock);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "static: Failed to initialize mutex, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ err = aws_channel_slot_set_handler(slot, &websocket->channel_handler);
+ if (err) {
+ goto error;
+ }
+
+ /* Ensure websocket (and the rest of the channel) can't be destroyed until aws_websocket_release() is called */
+ aws_channel_acquire_hold(options->channel);
+
+ return websocket;
+
+error:
+ if (slot) {
+ if (websocket && !slot->handler) {
+ websocket->channel_handler.vtable->destroy(&websocket->channel_handler);
+ }
+ aws_channel_slot_remove(slot);
+ }
+ return NULL;
+}
+
+static void s_handler_destroy(struct aws_channel_handler *handler) {
+ struct aws_websocket *websocket = handler->impl;
+ AWS_ASSERT(!websocket->thread_data.current_outgoing_frame);
+ AWS_ASSERT(!websocket->thread_data.current_incoming_frame);
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Destroying websocket.", (void *)websocket);
+
+ aws_websocket_decoder_clean_up(&websocket->thread_data.decoder);
+ aws_byte_buf_clean_up(&websocket->thread_data.incoming_ping_payload);
+ aws_mutex_clean_up(&websocket->synced_data.lock);
+ aws_mem_release(websocket->alloc, websocket);
+}
+
+struct aws_websocket *aws_websocket_acquire(struct aws_websocket *websocket) {
+ AWS_PRECONDITION(websocket);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Acquiring websocket ref-count.", (void *)websocket);
+ aws_ref_count_acquire(&websocket->ref_count);
+ return websocket;
+}
+
+void aws_websocket_release(struct aws_websocket *websocket) {
+ if (!websocket) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Releasing websocket ref-count.", (void *)websocket);
+ aws_ref_count_release(&websocket->ref_count);
+}
+
+static void s_websocket_on_refcount_zero(void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(websocket->channel_slot);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket ref-count is zero, shut down if necessary.", (void *)websocket);
+
+ /* Channel might already be shut down, but make sure */
+ s_schedule_channel_shutdown(websocket, AWS_ERROR_SUCCESS);
+
+ /* Channel won't destroy its slots/handlers until its refcount reaches 0 */
+ aws_channel_release_hold(websocket->channel_slot->channel);
+}
+
+struct aws_channel *aws_websocket_get_channel(const struct aws_websocket *websocket) {
+ return websocket->channel_slot->channel;
+}
+
+int aws_websocket_convert_to_midchannel_handler(struct aws_websocket *websocket) {
+ if (!aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Cannot convert to midchannel handler on this thread.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY);
+ }
+
+ if (websocket->thread_data.is_midchannel_handler) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket has already converted to midchannel handler.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER);
+ }
+
+ if (websocket->thread_data.is_reading_stopped || websocket->thread_data.is_writing_stopped) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot convert websocket to midchannel handler because it is closed or closing.",
+ (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ if (websocket->thread_data.current_incoming_frame) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot convert to midchannel handler in the middle of an incoming frame.",
+ (void *)websocket);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ websocket->thread_data.is_midchannel_handler = true;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_send_frame(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_send_frame_options *options,
+ bool from_public_api) {
+
+ AWS_ASSERT(websocket);
+ AWS_ASSERT(options);
+
+ /* Check for bad input. Log about non-obvious errors. */
+ if (options->payload_length > 0 && !options->stream_outgoing_payload) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Invalid frame options, payload streaming function required when payload length is non-zero.",
+ (void *)websocket);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct outgoing_frame *frame = aws_mem_calloc(websocket->alloc, 1, sizeof(struct outgoing_frame));
+ if (!frame) {
+ return AWS_OP_ERR;
+ }
+
+ frame->def = *options;
+
+ /* Enqueue frame, unless no further sending is allowed. */
+ int send_error = 0;
+ bool should_schedule_task = false;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ if (websocket->synced_data.is_midchannel_handler && from_public_api) {
+ send_error = AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER;
+ } else if (websocket->synced_data.send_frame_error_code) {
+ send_error = websocket->synced_data.send_frame_error_code;
+ } else {
+ aws_linked_list_push_back(&websocket->synced_data.outgoing_frame_list, &frame->node);
+ if (!websocket->synced_data.is_move_synced_data_to_thread_task_scheduled) {
+ websocket->synced_data.is_move_synced_data_to_thread_task_scheduled = true;
+ should_schedule_task = true;
+ }
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (send_error) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot send frame, error %d (%s).",
+ (void *)websocket,
+ send_error,
+ aws_error_name(send_error));
+
+ aws_mem_release(websocket->alloc, frame);
+ return aws_raise_error(send_error);
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Enqueuing outgoing frame with opcode=%" PRIu8 "(%s) length=%" PRIu64 " fin=%s",
+ (void *)websocket,
+ options->opcode,
+ aws_websocket_opcode_str(options->opcode),
+ options->payload_length,
+ options->fin ? "T" : "F");
+
+ if (should_schedule_task) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Scheduling synced data task.", (void *)websocket);
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->move_synced_data_to_thread_task);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_websocket_send_frame(struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options) {
+ return s_send_frame(websocket, options, true);
+}
+
+static void s_move_synced_data_to_thread_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ struct aws_linked_list tmp_list;
+ aws_linked_list_init(&tmp_list);
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ aws_linked_list_swap_contents(&websocket->synced_data.outgoing_frame_list, &tmp_list);
+
+ websocket->synced_data.is_move_synced_data_to_thread_task_scheduled = false;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (!aws_linked_list_empty(&tmp_list)) {
+ aws_linked_list_move_all_back(&websocket->thread_data.outgoing_frame_list, &tmp_list);
+ s_try_write_outgoing_frames(websocket);
+ }
+}
+
+static void s_try_write_outgoing_frames(struct aws_websocket *websocket) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ int err;
+
+ /* Check whether we should be writing data */
+ if (!websocket->thread_data.current_outgoing_frame &&
+ aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) {
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: No data to write at this time.", (void *)websocket);
+ return;
+ }
+
+ if (websocket->thread_data.is_waiting_for_write_completion) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Waiting until outstanding aws_io_message is written to socket before sending more data.",
+ (void *)websocket);
+ return;
+ }
+
+ if (websocket->thread_data.is_writing_stopped) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket is no longer sending data.", (void *)websocket);
+ return;
+ }
+
+ /* Acquire aws_io_message */
+ struct aws_io_message *io_msg = aws_channel_slot_acquire_max_message_for_write(websocket->channel_slot);
+ if (!io_msg) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed acquire message from pool, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ io_msg->user_data = websocket;
+ io_msg->on_completion = s_io_message_write_completed;
+
+ /* Loop through frames, writing their data into the io_msg */
+ bool wrote_close_frame = false;
+ while (!websocket->thread_data.is_writing_stopped) {
+ if (websocket->thread_data.current_outgoing_frame) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Resuming write of frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 ".",
+ (void *)websocket,
+ (void *)websocket->thread_data.current_outgoing_frame,
+ websocket->thread_data.current_outgoing_frame->def.opcode,
+ aws_websocket_opcode_str(websocket->thread_data.current_outgoing_frame->def.opcode),
+ websocket->thread_data.current_outgoing_frame->def.payload_length);
+
+ } else {
+ /* We're not in the middle of encoding a frame, so pop off the next one to encode. */
+ if (aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: No more frames to write.", (void *)websocket);
+ break;
+ }
+
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->thread_data.outgoing_frame_list);
+ websocket->thread_data.current_outgoing_frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node);
+
+ struct aws_websocket_frame frame = {
+ .fin = websocket->thread_data.current_outgoing_frame->def.fin,
+ .opcode = websocket->thread_data.current_outgoing_frame->def.opcode,
+ .payload_length = websocket->thread_data.current_outgoing_frame->def.payload_length,
+ };
+
+ /* RFC-6455 Section 5.3 Client-to-Server Masking
+ * Clients must mask payload with key derived from an unpredictable source of entropy. */
+ if (!websocket->is_server) {
+ frame.masked = true;
+ /* TODO: faster source of random (but still seeded by device_random) */
+ struct aws_byte_buf masking_key_buf = aws_byte_buf_from_empty_array(frame.masking_key, 4);
+ err = aws_device_random_buffer(&masking_key_buf);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to derive masking key, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ }
+
+ err = aws_websocket_encoder_start_frame(&websocket->thread_data.encoder, &frame);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to start frame encoding, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Start writing frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 ".",
+ (void *)websocket,
+ (void *)websocket->thread_data.current_outgoing_frame,
+ websocket->thread_data.current_outgoing_frame->def.opcode,
+ aws_websocket_opcode_str(websocket->thread_data.current_outgoing_frame->def.opcode),
+ websocket->thread_data.current_outgoing_frame->def.payload_length);
+ }
+
+ err = aws_websocket_encoder_process(&websocket->thread_data.encoder, &io_msg->message_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Frame encoding failed with error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_websocket_encoder_is_frame_in_progress(&websocket->thread_data.encoder)) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing frame still in progress, but no more data can be written at this time.",
+ (void *)websocket);
+ break;
+ }
+
+ if (websocket->thread_data.current_outgoing_frame->def.opcode == AWS_WEBSOCKET_OPCODE_CLOSE) {
+ wrote_close_frame = true;
+ }
+
+ /*
+ * a completely-written frame gets added to the write completion list so that when the socket write completes
+ * we can complete all of the outbound frames that were finished as part of the io message
+ */
+ aws_linked_list_push_back(
+ &websocket->thread_data.write_completion_frames, &websocket->thread_data.current_outgoing_frame->node);
+
+ websocket->thread_data.current_outgoing_frame = NULL;
+
+ if (wrote_close_frame) {
+ break;
+ }
+ }
+
+ /* If payload stream didn't have any bytes available to read right now, then the aws_io_message might be empty.
+ * If this is the case schedule a task to try again in the future. */
+ if (io_msg->message_data.len == 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Reading from payload stream would block, will try again later.",
+ (void *)websocket);
+
+ if (!websocket->thread_data.is_waiting_on_payload_stream_task) {
+ websocket->thread_data.is_waiting_on_payload_stream_task = true;
+
+ /* Future Optimization Idea: Minimize work while we wait. Use some kind of backoff for the retry timing,
+ * or have some way for stream to notify when more data is available. */
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->waiting_on_payload_stream_task);
+ }
+
+ aws_mem_release(io_msg->allocator, io_msg);
+ return;
+ }
+
+ /* Prepare to send aws_io_message up the channel. */
+
+ /* If CLOSE frame was written, that's the last data we'll write */
+ if (wrote_close_frame) {
+ s_stop_writing(websocket, AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT);
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Sending aws_io_message of size %zu in write direction.",
+ (void *)websocket,
+ io_msg->message_data.len);
+
+ websocket->thread_data.is_waiting_for_write_completion = true;
+ err = aws_channel_slot_send_message(websocket->channel_slot, io_msg, AWS_CHANNEL_DIR_WRITE);
+ if (err) {
+ websocket->thread_data.is_waiting_for_write_completion = false;
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send message in write direction, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Finish shutdown if we were waiting for the CLOSE frame to be written */
+ if (wrote_close_frame && websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: CLOSE frame sent, finishing handler shutdown sequence.", (void *)websocket);
+
+ s_finish_shutdown(websocket);
+ }
+
+ return;
+
+error:
+ if (io_msg) {
+ aws_mem_release(io_msg->allocator, io_msg);
+ }
+
+ s_shutdown_due_to_write_err(websocket, aws_last_error());
+}
+
+/* Encoder's outgoing_payload callback invokes current frame's callback */
+static int s_encoder_stream_outgoing_payload(struct aws_byte_buf *out_buf, void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.current_outgoing_frame);
+
+ struct outgoing_frame *current_frame = websocket->thread_data.current_outgoing_frame;
+ AWS_ASSERT(current_frame->def.stream_outgoing_payload);
+
+ bool callback_result = current_frame->def.stream_outgoing_payload(websocket, out_buf, current_frame->def.user_data);
+ if (!callback_result) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing payload callback has reported a failure.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_waiting_on_payload_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ /* If channel has shut down, don't need to resume sending payload */
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Done waiting for payload stream, sending more data...", (void *)websocket);
+
+ websocket->thread_data.is_waiting_on_payload_stream_task = false;
+ s_try_write_outgoing_frames(websocket);
+}
+
+static void s_io_message_write_completed(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data) {
+
+ (void)channel;
+ (void)message;
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(channel));
+
+ /*
+ * Invoke the completion callbacks (and then destroy) for all the frames that were completely written as
+ * part of this message completion at the socket layer
+ */
+ s_complete_frame_list(websocket, &websocket->thread_data.write_completion_frames, err_code);
+
+ if (err_code == AWS_ERROR_SUCCESS) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: aws_io_message written to socket, sending more data...", (void *)websocket);
+
+ websocket->thread_data.is_waiting_for_write_completion = false;
+ s_try_write_outgoing_frames(websocket);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: aws_io_message did not finish writing to socket, error %d (%s).",
+ (void *)websocket,
+ err_code,
+ aws_error_name(err_code));
+
+ s_shutdown_due_to_write_err(websocket, err_code);
+ }
+}
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ (void)slot;
+ struct aws_websocket *websocket = handler->impl;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ /* For each aws_io_message headed in the write direction, send a BINARY frame,
+ * where the frame's payload is the data from this aws_io_message. */
+ struct aws_websocket_send_frame_options options = {
+ .payload_length = message->message_data.len,
+ .user_data = message,
+ .stream_outgoing_payload = s_midchannel_send_payload,
+ .on_complete = s_midchannel_send_complete,
+ .opcode = AWS_WEBSOCKET_OPCODE_BINARY,
+ .fin = true,
+ };
+
+ /* Use copy_mark to track progress as the data is streamed out */
+ message->copy_mark = 0;
+
+ int err = s_send_frame(websocket, &options, false);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Callback for writing data from downstream aws_io_messages into payload of BINARY frames headed upstream */
+static bool s_midchannel_send_payload(struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data) {
+ (void)websocket;
+ struct aws_io_message *io_msg = user_data;
+
+ /* copy_mark is used to track progress */
+ size_t src_available = io_msg->message_data.len - io_msg->copy_mark;
+ size_t dst_available = out_buf->capacity - out_buf->len;
+ size_t sending = dst_available < src_available ? dst_available : src_available;
+
+ bool success = aws_byte_buf_write(out_buf, io_msg->message_data.buffer + io_msg->copy_mark, sending);
+
+ io_msg->copy_mark += sending;
+ return success;
+}
+
+/* Callback when data from downstream aws_io_messages, finishes being sent as a BINARY frame upstream. */
+static void s_midchannel_send_complete(struct aws_websocket *websocket, int error_code, void *user_data) {
+ (void)websocket;
+ struct aws_io_message *io_msg = user_data;
+
+ if (io_msg->on_completion) {
+ io_msg->on_completion(io_msg->owning_channel, io_msg, error_code, io_msg->user_data);
+ }
+
+ aws_mem_release(io_msg->allocator, io_msg);
+}
+
+static void s_destroy_outgoing_frame(struct aws_websocket *websocket, struct outgoing_frame *frame, int error_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Completed outgoing frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 " with error_code %d (%s).",
+ (void *)websocket,
+ (void *)frame,
+ frame->def.opcode,
+ aws_websocket_opcode_str(frame->def.opcode),
+ frame->def.payload_length,
+ error_code,
+ aws_error_name(error_code));
+
+ if (frame->def.on_complete) {
+ frame->def.on_complete(websocket, error_code, frame->def.user_data);
+ }
+
+ aws_mem_release(websocket->alloc, frame);
+}
+
+static void s_complete_frame_list(struct aws_websocket *websocket, struct aws_linked_list *frames, int error_code) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(frames);
+ while (node != aws_linked_list_end(frames)) {
+ struct outgoing_frame *frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node);
+
+ node = aws_linked_list_next(node);
+ s_destroy_outgoing_frame(websocket, frame, error_code);
+ }
+
+ /* we've released everything, so reset the list to empty */
+ aws_linked_list_init(frames);
+}
+
+static void s_stop_writing(struct aws_websocket *websocket, int send_frame_error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(send_frame_error_code != AWS_ERROR_SUCCESS);
+
+ if (websocket->thread_data.is_writing_stopped) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket will send no more data, future attempts to send will get error %d (%s).",
+ (void *)websocket,
+ send_frame_error_code,
+ aws_error_name(send_frame_error_code));
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ websocket->synced_data.send_frame_error_code = send_frame_error_code;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ websocket->thread_data.is_writing_stopped = true;
+}
+
+static void s_shutdown_due_to_write_err(struct aws_websocket *websocket, int error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ /* No more writing allowed (it's ok to call this redundantly). */
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+
+ /* If there's a current outgoing frame, complete it with the specific error code.
+ * Any other pending frames will complete with the generic CONNECTION_CLOSED error. */
+ if (websocket->thread_data.current_outgoing_frame) {
+ s_destroy_outgoing_frame(websocket, websocket->thread_data.current_outgoing_frame, error_code);
+ websocket->thread_data.current_outgoing_frame = NULL;
+ }
+
+ /* If we're in the final stages of shutdown, ensure shutdown completes.
+ * Otherwise tell the channel to shutdown (it's ok to shutdown the channel redundantly). */
+ if (websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) {
+ s_finish_shutdown(websocket);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Closing websocket due to failure during write, error %d (%s).",
+ (void *)websocket,
+ error_code,
+ aws_error_name(error_code));
+ s_schedule_channel_shutdown(websocket, error_code);
+ }
+}
+
+static void s_shutdown_due_to_read_err(struct aws_websocket *websocket, int error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Closing websocket due to failure during read, error %d (%s).",
+ (void *)websocket,
+ error_code,
+ aws_error_name(error_code));
+
+ websocket->thread_data.is_reading_stopped = true;
+
+ /* If there's a current incoming frame, complete it with the specific error code. */
+ if (websocket->thread_data.current_incoming_frame) {
+ s_complete_incoming_frame(websocket, error_code, NULL);
+ }
+
+ /* Tell channel to shutdown (it's ok to call this redundantly) */
+ s_schedule_channel_shutdown(websocket, error_code);
+}
+
+static void s_shutdown_channel_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ int error_code;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ error_code = websocket->synced_data.shutdown_channel_task_error_code;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ aws_channel_shutdown(websocket->channel_slot->channel, error_code);
+}
+
+/* Tell the channel to shut down. It is safe to call this multiple times.
+ * The call to aws_channel_shutdown() is delayed so that a user invoking aws_websocket_close doesn't
+ * have completion callbacks firing before the function call even returns */
+static void s_schedule_channel_shutdown(struct aws_websocket *websocket, int error_code) {
+ bool schedule_shutdown = false;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ if (!websocket->synced_data.is_shutdown_channel_task_scheduled) {
+ schedule_shutdown = true;
+ websocket->synced_data.is_shutdown_channel_task_scheduled = true;
+ websocket->synced_data.shutdown_channel_task_error_code = error_code;
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (schedule_shutdown) {
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->shutdown_channel_task);
+ }
+}
+
+void aws_websocket_close(struct aws_websocket *websocket, bool free_scarce_resources_immediately) {
+ bool is_midchannel_handler;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+ is_midchannel_handler = websocket->synced_data.is_midchannel_handler;
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (is_midchannel_handler) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Ignoring close call, websocket has converted to midchannel handler.",
+ (void *)websocket);
+ return;
+ }
+
+ /* TODO: aws_channel_shutdown() should let users specify error_code and "immediate" as separate parameters.
+ * Currently, any non-zero error_code results in "immediate" shutdown */
+ int error_code = AWS_ERROR_SUCCESS;
+ if (free_scarce_resources_immediately) {
+ error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+
+ s_schedule_channel_shutdown(websocket, error_code);
+}
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel));
+ struct aws_websocket *websocket = handler->impl;
+ int err;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket handler shutting down dir=%s error_code=%d immediate=%d.",
+ (void *)websocket,
+ dir == AWS_CHANNEL_DIR_READ ? "READ" : "WRITE",
+ error_code,
+ free_scarce_resources_immediately);
+
+ if (dir == AWS_CHANNEL_DIR_READ) {
+ /* Shutdown in the read direction is immediate and simple. */
+ websocket->thread_data.is_reading_stopped = true;
+ aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately);
+
+ } else {
+ websocket->thread_data.channel_shutdown_error_code = error_code;
+ websocket->thread_data.channel_shutdown_free_scarce_resources_immediately = free_scarce_resources_immediately;
+ websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written = true;
+
+ if (websocket->thread_data.channel_shutdown_free_scarce_resources_immediately ||
+ websocket->thread_data.is_writing_stopped) {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Finishing handler shutdown immediately, without ensuring a CLOSE frame was sent.",
+ (void *)websocket);
+
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ s_finish_shutdown(websocket);
+ } else {
+ /* Attempt to queue a CLOSE frame, then wait for it to send before finishing shutdown. */
+ struct aws_websocket_send_frame_options close_frame = {
+ .opcode = AWS_WEBSOCKET_OPCODE_CLOSE,
+ .fin = true,
+ };
+ err = s_send_frame(websocket, &close_frame, false);
+ if (err) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send CLOSE frame, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ s_finish_shutdown(websocket);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing CLOSE frame queued, handler will finish shutdown once it's sent.",
+ (void *)websocket);
+ /* schedule a task to run after 1 sec. If the CLOSE still not sent at that time, we should just cancel
+ * sending it and shutdown the channel. */
+ uint64_t schedule_time = 0;
+ aws_channel_current_clock_time(websocket->channel_slot->channel, &schedule_time);
+ schedule_time += AWS_WEBSOCKET_CLOSE_TIMEOUT;
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: websocket_close_timeout task will be run at timestamp %" PRIu64,
+ (void *)websocket,
+ schedule_time);
+ aws_channel_schedule_task_future(
+ websocket->channel_slot->channel, &websocket->close_timeout_task, schedule_time);
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_close_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ /* If channel has shut down, don't need to resume sending payload */
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ if (!websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) {
+ /* Not waiting for write to complete, which means the CLOSE frame has sent, just do nothing */
+ return;
+ }
+
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send CLOSE frame, timeout happened, shutdown the channel",
+ (void *)websocket);
+
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ s_finish_shutdown(websocket);
+}
+
+static void s_finish_shutdown(struct aws_websocket *websocket) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.is_writing_stopped);
+ AWS_ASSERT(websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written);
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Finishing websocket handler shutdown.", (void *)websocket);
+
+ websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written = false;
+
+ /* Cancel all incomplete frames */
+ if (websocket->thread_data.current_incoming_frame) {
+ s_complete_incoming_frame(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED, NULL);
+ }
+
+ if (websocket->thread_data.current_outgoing_frame) {
+ s_destroy_outgoing_frame(
+ websocket, websocket->thread_data.current_outgoing_frame, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ websocket->thread_data.current_outgoing_frame = NULL;
+ }
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ while (!aws_linked_list_empty(&websocket->synced_data.outgoing_frame_list)) {
+ /* Move frames from synced_data to thread_data, then cancel them together outside critical section */
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->synced_data.outgoing_frame_list);
+ aws_linked_list_push_back(&websocket->thread_data.outgoing_frame_list, node);
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ s_complete_frame_list(websocket, &websocket->thread_data.write_completion_frames, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+
+ while (!aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->thread_data.outgoing_frame_list);
+ struct outgoing_frame *frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node);
+ s_destroy_outgoing_frame(websocket, frame, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ aws_channel_slot_on_handler_shutdown_complete(
+ websocket->channel_slot,
+ AWS_CHANNEL_DIR_WRITE,
+ websocket->thread_data.channel_shutdown_error_code,
+ websocket->thread_data.channel_shutdown_free_scarce_resources_immediately);
+}
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ AWS_ASSERT(message);
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel));
+ struct aws_websocket *websocket = handler->impl;
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&message->message_data);
+ int err;
+
+ /* At the end of this function we'll bump the window back up by this amount.
+ * We start off assuming we'll re-open the window by the whole amount,
+ * but this number will go down if we process any payload data that ought to shrink the window */
+ websocket->thread_data.incoming_message_window_update = message->message_data.len;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Begin processing incoming message of size %zu.",
+ (void *)websocket,
+ message->message_data.len);
+
+ while (cursor.len) {
+ if (websocket->thread_data.is_reading_stopped) {
+ goto clean_up;
+ }
+
+ bool frame_complete;
+ err = aws_websocket_decoder_process(&websocket->thread_data.decoder, &cursor, &frame_complete);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed processing incoming message, error %d (%s). Closing connection.",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ if (frame_complete) {
+ bool callback_result;
+ s_complete_incoming_frame(websocket, AWS_ERROR_SUCCESS, &callback_result);
+ if (!callback_result) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Incoming frame completion callback has reported a failure. Closing connection",
+ (void *)websocket);
+
+ aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ goto error;
+ }
+ }
+ }
+
+ if (websocket->thread_data.incoming_message_window_update > 0) {
+ err = aws_channel_slot_increment_read_window(slot, websocket->thread_data.incoming_message_window_update);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to increment read window after message processing, error %d (%s). Closing "
+ "connection.",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ }
+
+ goto clean_up;
+
+error:
+ s_shutdown_due_to_read_err(websocket, aws_last_error());
+
+clean_up:
+ if (cursor.len > 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Done processing incoming message, final %zu bytes ignored.",
+ (void *)websocket,
+ cursor.len);
+ } else {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Done processing incoming message.", (void *)websocket);
+ }
+ aws_mem_release(message->allocator, message);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_frame(const struct aws_websocket_frame *frame, void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(!websocket->thread_data.current_incoming_frame);
+ AWS_ASSERT(!websocket->thread_data.is_reading_stopped);
+
+ websocket->thread_data.current_incoming_frame = &websocket->thread_data.incoming_frame_storage;
+
+ websocket->thread_data.current_incoming_frame->payload_length = frame->payload_length;
+ websocket->thread_data.current_incoming_frame->opcode = frame->opcode;
+ websocket->thread_data.current_incoming_frame->fin = frame->fin;
+
+ /* If CONTINUATION frames are expected, remember which type of data is being continued.
+ * RFC-6455 Section 5.4 Fragmentation */
+ if (aws_websocket_is_data_frame(frame->opcode)) {
+ if (frame->opcode != AWS_WEBSOCKET_OPCODE_CONTINUATION) {
+ if (frame->fin) {
+ websocket->thread_data.continuation_of_opcode = 0;
+ } else {
+ websocket->thread_data.continuation_of_opcode = frame->opcode;
+ }
+ }
+ } else if (frame->opcode == AWS_WEBSOCKET_OPCODE_PING) {
+ /* Prepare to store payload of PING so we can echo it back in the PONG */
+ aws_byte_buf_reset(&websocket->thread_data.incoming_ping_payload, false /*zero_contents*/);
+ /* Note: we are NOT calling aws_byte_buf_reserve().
+ * This works around an attack where a malicious peer CLAIMS they'll send a huge frame,
+ * which would case OOM if we did the reserve immediately.
+ * If a malicious peer wants to run us out of memory, they'll need to do
+ * it the costly way and actually send a billion bytes.
+ * Or we could impose our own internal limits, but for now this is simpler */
+ }
+
+ /* Invoke user cb */
+ bool callback_result = true;
+ if (websocket->on_incoming_frame_begin && !websocket->thread_data.is_midchannel_handler) {
+ callback_result = websocket->on_incoming_frame_begin(
+ websocket, websocket->thread_data.current_incoming_frame, websocket->user_data);
+ }
+
+ if (!callback_result) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Incoming frame callback has reported a failure.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_payload(struct aws_byte_cursor data, void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.current_incoming_frame);
+ AWS_ASSERT(!websocket->thread_data.is_reading_stopped);
+
+ /* Store payload of PING so we can echo it back in the PONG */
+ if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_PING) {
+ aws_byte_buf_append_dynamic(&websocket->thread_data.incoming_ping_payload, &data);
+ }
+
+ if (websocket->thread_data.is_midchannel_handler) {
+ return s_decoder_on_midchannel_payload(websocket, data);
+ }
+
+ return s_decoder_on_user_payload(websocket, data);
+}
+
+/* Invoke user cb */
+static int s_decoder_on_user_payload(struct aws_websocket *websocket, struct aws_byte_cursor data) {
+ if (websocket->on_incoming_frame_payload) {
+ if (!websocket->on_incoming_frame_payload(
+ websocket, websocket->thread_data.current_incoming_frame, data, websocket->user_data)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Incoming payload callback has reported a failure.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ }
+ }
+
+ /* If this is a "data" frame's payload, let the window shrink */
+ if (aws_websocket_is_data_frame(websocket->thread_data.current_incoming_frame->opcode) &&
+ websocket->manual_window_update) {
+
+ websocket->thread_data.incoming_message_window_update -= data.len;
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: The read window is shrinking by %zu due to incoming payload from 'data' frame.",
+ (void *)websocket,
+ data.len);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Pass data to channel handler on the right */
+static int s_decoder_on_midchannel_payload(struct aws_websocket *websocket, struct aws_byte_cursor data) {
+ struct aws_io_message *io_msg = NULL;
+
+ /* Only pass data to next handler if it's from a BINARY frame (or the CONTINUATION of a BINARY frame) */
+ bool is_binary_data = websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_BINARY ||
+ (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_CONTINUATION &&
+ websocket->thread_data.continuation_of_opcode == AWS_WEBSOCKET_OPCODE_BINARY);
+ if (!is_binary_data) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_ASSERT(websocket->channel_slot->adj_right); /* Expected another slot in the read direction */
+
+ /* Note that current implementation of websocket handler does not buffer data travelling in the "read" direction,
+ * so the downstream read window needs to be large enough to immediately receive incoming data. */
+ if (aws_channel_slot_downstream_read_window(websocket->channel_slot) < data.len) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot send entire message without exceeding read window.",
+ (void *)websocket);
+ aws_raise_error(AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW);
+ goto error;
+ }
+
+ io_msg = aws_channel_acquire_message_from_pool(
+ websocket->channel_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, data.len);
+ if (!io_msg) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to acquire message.", (void *)websocket);
+ goto error;
+ }
+
+ if (io_msg->message_data.capacity < data.len) {
+ /* Probably can't happen. Data is coming an aws_io_message, should be able to acquire another just as big */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to acquire sufficiently large message.", (void *)websocket);
+ aws_raise_error(AWS_ERROR_UNKNOWN);
+ goto error;
+ }
+
+ if (!aws_byte_buf_write_from_whole_cursor(&io_msg->message_data, data)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Unexpected error while copying data.", (void *)websocket);
+ aws_raise_error(AWS_ERROR_UNKNOWN);
+ goto error;
+ }
+
+ int err = aws_channel_slot_send_message(websocket->channel_slot, io_msg, AWS_CHANNEL_DIR_READ);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send read message, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Reduce amount by which websocket will update its read window */
+ AWS_ASSERT(websocket->thread_data.incoming_message_window_update >= data.len);
+ websocket->thread_data.incoming_message_window_update -= data.len;
+
+ return AWS_OP_SUCCESS;
+
+error:
+ if (io_msg) {
+ aws_mem_release(io_msg->allocator, io_msg);
+ }
+ return AWS_OP_ERR;
+}
+
+/* When the websocket sends a frame automatically (PONG, CLOSE),
+ * this holds the payload. */
+struct aws_websocket_autopayload {
+ struct aws_allocator *alloc;
+ struct aws_byte_buf buf;
+ struct aws_byte_cursor advancing_cursor;
+};
+
+static struct aws_websocket_autopayload *s_autopayload_new(
+ struct aws_allocator *alloc,
+ const struct aws_byte_buf *src) {
+
+ struct aws_websocket_autopayload *autopayload = aws_mem_calloc(alloc, 1, sizeof(struct aws_websocket_autopayload));
+ autopayload->alloc = alloc;
+ if (src->len > 0) {
+ aws_byte_buf_init_copy(&autopayload->buf, alloc, src);
+ autopayload->advancing_cursor = aws_byte_cursor_from_buf(&autopayload->buf);
+ }
+
+ return autopayload;
+}
+
+static void s_autopayload_destroy(struct aws_websocket_autopayload *autopayload) {
+ aws_byte_buf_clean_up(&autopayload->buf);
+ aws_mem_release(autopayload->alloc, autopayload);
+}
+
+static void s_autopayload_send_complete(struct aws_websocket *websocket, int error_code, void *user_data) {
+ (void)websocket;
+ (void)error_code;
+
+ struct aws_websocket_autopayload *autopayload = user_data;
+ s_autopayload_destroy(autopayload);
+}
+
+static bool s_autopayload_stream_outgoing_payload(
+ struct aws_websocket *websocket,
+ struct aws_byte_buf *out_buf,
+ void *user_data) {
+
+ (void)websocket;
+ struct aws_websocket_autopayload *autopayload = user_data;
+ aws_byte_buf_write_to_capacity(out_buf, &autopayload->advancing_cursor);
+ return true;
+}
+
+static void s_complete_incoming_frame(struct aws_websocket *websocket, int error_code, bool *out_callback_result) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.current_incoming_frame);
+
+ if (error_code == 0) {
+ /* If this was a CLOSE frame, don't read any more data. */
+ if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_CLOSE) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Close frame received, any further data received will be ignored.",
+ (void *)websocket);
+ websocket->thread_data.is_reading_stopped = true;
+
+ /* TODO: auto-close if there's a channel-handler to the right */
+
+ } else if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_PING) {
+ /* Automatically respond to a PING with a PONG */
+ if (!websocket->thread_data.is_writing_stopped) {
+ /* Optimization idea: avoid allocations/copies each time we send an auto-PONG.
+ * Maybe have a small autopayload pool, instead of allocating one each time.
+ * Maybe encode directly to aws_io_message, instead of copying to a buf, that's copied to a msg later.
+ * Maybe "std::move()" the aws_byte_bufs around instead of copying them. */
+ struct aws_websocket_autopayload *autopong =
+ s_autopayload_new(websocket->alloc, &websocket->thread_data.incoming_ping_payload);
+
+ struct aws_websocket_send_frame_options pong_frame = {
+ .opcode = AWS_WEBSOCKET_OPCODE_PONG,
+ .fin = true,
+ .payload_length = autopong->buf.len,
+ .stream_outgoing_payload = s_autopayload_stream_outgoing_payload,
+ .on_complete = s_autopayload_send_complete,
+ .user_data = autopong,
+ };
+
+ int send_err = s_send_frame(websocket, &pong_frame, false /*from_public_api*/);
+ /* Failure should be impossible. We already checked that writing is not stopped */
+ AWS_FATAL_ASSERT(!send_err && "Unexpected failure sending websocket PONG");
+ }
+ }
+ }
+
+ /* Invoke user cb */
+ bool callback_result = true;
+ if (websocket->on_incoming_frame_complete && !websocket->thread_data.is_midchannel_handler) {
+ callback_result = websocket->on_incoming_frame_complete(
+ websocket, websocket->thread_data.current_incoming_frame, error_code, websocket->user_data);
+ }
+
+ if (out_callback_result) {
+ *out_callback_result = callback_result;
+ }
+
+ websocket->thread_data.current_incoming_frame = NULL;
+}
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) {
+ struct aws_websocket *websocket = handler->impl;
+ return websocket->initial_window_size;
+}
+
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+ return AWS_WEBSOCKET_MAX_FRAME_OVERHEAD;
+}
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size) {
+
+ struct aws_websocket *websocket = handler->impl;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel));
+ AWS_ASSERT(websocket->thread_data.is_midchannel_handler);
+
+ /* NOTE: This is pretty hacky and should change if it ever causes issues.
+ *
+ * Currently, all read messages are processed the moment they're received.
+ * If the downstream read window is open enough to accept this data, we can send it right along.
+ * BUT if the downstream window were too small, we'd need to buffer the data and wait until
+ * the downstream window opened again to finish sending.
+ *
+ * To avoid that complexity, we go to pains here to ensure that the websocket's window exactly
+ * matches the window to the right, allowing us to avoid buffering in the read direction.
+ */
+ size_t increment = size;
+ if (websocket->thread_data.last_known_right_slot != slot->adj_right) {
+ if (size < slot->window_size) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: The websocket does not support downstream handlers with a smaller window.",
+ (void *)websocket);
+ aws_raise_error(AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW);
+ goto error;
+ }
+
+ /* New handler to the right, make sure websocket's window matches its window. */
+ websocket->thread_data.last_known_right_slot = slot->adj_right;
+ increment = size - slot->window_size;
+ }
+
+ if (increment != 0) {
+ int err = aws_channel_slot_increment_read_window(slot, increment);
+ if (err) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ websocket->thread_data.is_reading_stopped = true;
+ /* Shutting down channel because I know that no one ever checks these errors */
+ s_shutdown_due_to_read_err(websocket, aws_last_error());
+ return AWS_OP_ERR;
+}
+
+static void s_increment_read_window_action(struct aws_websocket *websocket, size_t size) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ int err = aws_channel_slot_increment_read_window(websocket->channel_slot, size);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to increment read window, error %d (%s). Closing websocket.",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ s_schedule_channel_shutdown(websocket, aws_last_error());
+ }
+}
+
+static void s_increment_read_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ size_t size;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ size = websocket->synced_data.window_increment_size;
+ websocket->synced_data.window_increment_size = 0;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Running task to increment read window by %zu.", (void *)websocket, size);
+
+ s_increment_read_window_action(websocket, size);
+}
+
+void aws_websocket_increment_read_window(struct aws_websocket *websocket, size_t size) {
+ if (size == 0) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Ignoring window increment of size 0.", (void *)websocket);
+ return;
+ }
+
+ if (!websocket->manual_window_update) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Ignoring window increment. Manual window management (aka read backpressure) is not enabled.",
+ (void *)websocket);
+ return;
+ }
+
+ /* Schedule a task to do the increment.
+ * If task is already scheduled, just increase size to be incremented */
+ bool is_midchannel_handler = false;
+ bool should_schedule_task = false;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ if (websocket->synced_data.is_midchannel_handler) {
+ is_midchannel_handler = true;
+ } else if (websocket->synced_data.window_increment_size == 0) {
+ should_schedule_task = true;
+ websocket->synced_data.window_increment_size = size;
+ } else {
+ websocket->synced_data.window_increment_size =
+ aws_add_size_saturating(websocket->synced_data.window_increment_size, size);
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (is_midchannel_handler) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Ignoring window increment call, websocket has converted to midchannel handler.",
+ (void *)websocket);
+ } else if (should_schedule_task) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Scheduling task to increment read window by %zu.", (void *)websocket, size);
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->increment_read_window_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Task to increment read window already scheduled, increasing scheduled size by %zu.",
+ (void *)websocket,
+ size);
+ }
+}
+
+int aws_websocket_random_handshake_key(struct aws_byte_buf *dst) {
+ /* RFC-6455 Section 4.1.
+ * Derive random 16-byte value, base64-encoded, for the Sec-WebSocket-Key header */
+ uint8_t key_random_storage[16] = {0};
+ struct aws_byte_buf key_random_buf = aws_byte_buf_from_empty_array(key_random_storage, sizeof(key_random_storage));
+ int err = aws_device_random_buffer(&key_random_buf);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor key_random_cur = aws_byte_cursor_from_buf(&key_random_buf);
+ err = aws_base64_encode(&key_random_cur, dst);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_http_message *aws_http_message_new_websocket_handshake_request(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_cursor host) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&path));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&host));
+
+ struct aws_http_message *request = aws_http_message_new_request(allocator);
+ if (!request) {
+ goto error;
+ }
+
+ int err = aws_http_message_set_request_method(request, aws_http_method_get);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_http_message_set_request_path(request, path);
+ if (err) {
+ goto error;
+ }
+
+ uint8_t key_storage[AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH];
+ struct aws_byte_buf key_buf = aws_byte_buf_from_empty_array(key_storage, sizeof(key_storage));
+ err = aws_websocket_random_handshake_key(&key_buf);
+ if (err) {
+ goto error;
+ }
+
+ struct aws_http_header required_headers[] = {
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"),
+ .value = host,
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"),
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"),
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Key"),
+ .value = aws_byte_cursor_from_buf(&key_buf),
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Version"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("13"),
+ },
+ };
+
+ for (size_t i = 0; i < AWS_ARRAY_SIZE(required_headers); ++i) {
+ err = aws_http_message_add_header(request, required_headers[i]);
+ if (err) {
+ goto error;
+ }
+ }
+
+ return request;
+
+error:
+ aws_http_message_destroy(request);
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c b/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
new file mode 100644
index 00000000000..b5225873059
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
@@ -0,0 +1,866 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/cal/hash.h>
+#include <aws/common/encoding.h>
+#include <aws/common/logging.h>
+#include <aws/common/string.h>
+#include <aws/http/connection.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/private/websocket_impl.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+#include <aws/io/uri.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/**
+ * Allow unit-tests to mock interactions with external systems.
+ */
+static const struct aws_websocket_client_bootstrap_system_vtable s_default_system_vtable = {
+ .aws_http_client_connect = aws_http_client_connect,
+ .aws_http_connection_release = aws_http_connection_release,
+ .aws_http_connection_close = aws_http_connection_close,
+ .aws_http_connection_get_channel = aws_http_connection_get_channel,
+ .aws_http_connection_make_request = aws_http_connection_make_request,
+ .aws_http_stream_activate = aws_http_stream_activate,
+ .aws_http_stream_release = aws_http_stream_release,
+ .aws_http_stream_get_connection = aws_http_stream_get_connection,
+ .aws_http_stream_update_window = aws_http_stream_update_window,
+ .aws_http_stream_get_incoming_response_status = aws_http_stream_get_incoming_response_status,
+ .aws_websocket_handler_new = aws_websocket_handler_new,
+};
+
+static const struct aws_websocket_client_bootstrap_system_vtable *s_system_vtable = &s_default_system_vtable;
+
+void aws_websocket_client_bootstrap_set_system_vtable(
+ const struct aws_websocket_client_bootstrap_system_vtable *system_vtable) {
+
+ s_system_vtable = system_vtable;
+}
+
+/**
+ * The websocket bootstrap brings a websocket connection into this world, and sees it out again.
+ * Spins up an HTTP client, performs the opening handshake (HTTP Upgrade request),
+ * creates the websocket handler, and inserts it into the channel.
+ * The bootstrap is responsible for firing the on_connection_setup and on_connection_shutdown callbacks.
+ */
+struct aws_websocket_client_bootstrap {
+ /* Settings copied in from aws_websocket_client_connection_options */
+ struct aws_allocator *alloc;
+ size_t initial_window_size;
+ bool manual_window_update;
+ void *user_data;
+ /* Setup callback will be set NULL once it's invoked.
+ * This is used to determine whether setup or shutdown should be invoked
+ * from the HTTP-shutdown callback. */
+ aws_websocket_on_connection_setup_fn *websocket_setup_callback;
+ aws_websocket_on_connection_shutdown_fn *websocket_shutdown_callback;
+ aws_websocket_on_incoming_frame_begin_fn *websocket_frame_begin_callback;
+ aws_websocket_on_incoming_frame_payload_fn *websocket_frame_payload_callback;
+ aws_websocket_on_incoming_frame_complete_fn *websocket_frame_complete_callback;
+
+ /* Handshake request data */
+ struct aws_http_message *handshake_request;
+
+ /* Given the "Sec-WebSocket-Key" from the request,
+ * this is what we expect the response's "Sec-WebSocket-Accept" to be */
+ struct aws_byte_buf expected_sec_websocket_accept;
+
+ /* Comma-separated values from the request's "Sec-WebSocket-Protocol" (or NULL if none) */
+ struct aws_string *expected_sec_websocket_protocols;
+
+ /* Handshake response data */
+ int response_status;
+ struct aws_http_headers *response_headers;
+ bool got_full_response_headers;
+ struct aws_byte_buf response_body;
+ bool got_full_response_body;
+
+ int setup_error_code;
+ struct aws_websocket *websocket;
+};
+
+static void s_ws_bootstrap_destroy(struct aws_websocket_client_bootstrap *ws_bootstrap);
+static int s_ws_bootstrap_calculate_sec_websocket_accept(
+ struct aws_byte_cursor sec_websocket_key,
+ struct aws_byte_buf *out_buf,
+ struct aws_allocator *alloc);
+static void s_ws_bootstrap_cancel_setup_due_to_err(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ struct aws_http_connection *http_connection,
+ int error_code);
+static void s_ws_bootstrap_on_http_setup(struct aws_http_connection *http_connection, int error_code, void *user_data);
+static void s_ws_bootstrap_on_http_shutdown(
+ struct aws_http_connection *http_connection,
+ int error_code,
+ void *user_data);
+static int s_ws_bootstrap_on_handshake_response_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data);
+static int s_ws_bootstrap_on_handshake_response_header_block_done(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data);
+static int s_ws_bootstrap_on_handshake_response_body(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data);
+static void s_ws_bootstrap_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data);
+
+int aws_websocket_client_connect(const struct aws_websocket_client_connection_options *options) {
+ aws_http_fatal_assert_library_initialized();
+ AWS_ASSERT(options);
+
+ /* Validate options */
+ struct aws_byte_cursor path;
+ aws_http_message_get_request_path(options->handshake_request, &path);
+ if (!options->allocator || !options->bootstrap || !options->socket_options || !options->host.len || !path.len ||
+ !options->on_connection_setup) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Missing required websocket connection options.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_byte_cursor method;
+ aws_http_message_get_request_method(options->handshake_request, &method);
+ if (aws_http_str_to_method(method) != AWS_HTTP_METHOD_GET) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Websocket request must have method be 'GET'.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->handshake_request) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Invalid connection options, missing required request for websocket client handshake.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ const struct aws_http_headers *request_headers = aws_http_message_get_headers(options->handshake_request);
+ struct aws_byte_cursor sec_websocket_key;
+ if (aws_http_headers_get(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Key"), &sec_websocket_key)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Websocket handshake request is missing required 'Sec-WebSocket-Key' header");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* Extensions are not currently supported */
+ if (aws_http_headers_has(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Extensions"))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: 'Sec-WebSocket-Extensions' are not currently supported");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* Create bootstrap */
+ struct aws_websocket_client_bootstrap *ws_bootstrap =
+ aws_mem_calloc(options->allocator, 1, sizeof(struct aws_websocket_client_bootstrap));
+
+ ws_bootstrap->alloc = options->allocator;
+ ws_bootstrap->initial_window_size = options->initial_window_size;
+ ws_bootstrap->manual_window_update = options->manual_window_management;
+ ws_bootstrap->user_data = options->user_data;
+ ws_bootstrap->websocket_setup_callback = options->on_connection_setup;
+ ws_bootstrap->websocket_shutdown_callback = options->on_connection_shutdown;
+ ws_bootstrap->websocket_frame_begin_callback = options->on_incoming_frame_begin;
+ ws_bootstrap->websocket_frame_payload_callback = options->on_incoming_frame_payload;
+ ws_bootstrap->websocket_frame_complete_callback = options->on_incoming_frame_complete;
+ ws_bootstrap->handshake_request = aws_http_message_acquire(options->handshake_request);
+ ws_bootstrap->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ ws_bootstrap->response_headers = aws_http_headers_new(ws_bootstrap->alloc);
+ aws_byte_buf_init(&ws_bootstrap->response_body, ws_bootstrap->alloc, 0);
+
+ if (s_ws_bootstrap_calculate_sec_websocket_accept(
+ sec_websocket_key, &ws_bootstrap->expected_sec_websocket_accept, ws_bootstrap->alloc)) {
+ goto error;
+ }
+
+ ws_bootstrap->expected_sec_websocket_protocols =
+ aws_http_headers_get_all(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"));
+
+ /* Initiate HTTP connection */
+ struct aws_http_client_connection_options http_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT;
+ http_options.allocator = ws_bootstrap->alloc;
+ http_options.bootstrap = options->bootstrap;
+ http_options.host_name = options->host;
+ http_options.socket_options = options->socket_options;
+ http_options.tls_options = options->tls_options;
+ http_options.proxy_options = options->proxy_options;
+
+ if (options->manual_window_management) {
+ http_options.manual_window_management = true;
+
+ /* Give HTTP handler enough window to comfortably receive the handshake response.
+ *
+ * If the upgrade is unsuccessful, the HTTP window will shrink as the response body is received.
+ * In this case, we'll keep incrementing the window back to its original size so data keeps arriving.
+ *
+ * If the upgrade is successful, then the websocket handler is installed, and
+ * the HTTP handler will take over its own window management. */
+ http_options.initial_window_size = 1024;
+ }
+
+ http_options.user_data = ws_bootstrap;
+ http_options.on_setup = s_ws_bootstrap_on_http_setup;
+ http_options.on_shutdown = s_ws_bootstrap_on_http_shutdown;
+ http_options.requested_event_loop = options->requested_event_loop;
+
+ /* Infer port, if not explicitly specified in URI */
+ http_options.port = options->port;
+ if (!http_options.port) {
+ http_options.port = options->tls_options ? 443 : 80;
+ }
+
+ if (s_system_vtable->aws_http_client_connect(&http_options)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Websocket failed to initiate HTTP connection, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Success! (so far) */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Websocket setup begun, connecting to " PRInSTR ":%" PRIu16 PRInSTR,
+ (void *)ws_bootstrap,
+ AWS_BYTE_CURSOR_PRI(options->host),
+ options->port,
+ AWS_BYTE_CURSOR_PRI(path));
+
+ return AWS_OP_SUCCESS;
+
+error:
+ s_ws_bootstrap_destroy(ws_bootstrap);
+ return AWS_OP_ERR;
+}
+
+static void s_ws_bootstrap_destroy(struct aws_websocket_client_bootstrap *ws_bootstrap) {
+ if (!ws_bootstrap) {
+ return;
+ }
+
+ aws_http_message_release(ws_bootstrap->handshake_request);
+ aws_http_headers_release(ws_bootstrap->response_headers);
+ aws_byte_buf_clean_up(&ws_bootstrap->expected_sec_websocket_accept);
+ aws_string_destroy(ws_bootstrap->expected_sec_websocket_protocols);
+ aws_byte_buf_clean_up(&ws_bootstrap->response_body);
+
+ aws_mem_release(ws_bootstrap->alloc, ws_bootstrap);
+}
+
+/* Given the handshake request's "Sec-WebSocket-Key" value,
+ * calculate the expected value for the response's "Sec-WebSocket-Accept".
+ * RFC-6455 Section 4.1:
+ * base64-encoded SHA-1 of the concatenation of the |Sec-WebSocket-Key|
+ * (as a string, not base64-decoded) with the string
+ * "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and
+ * trailing whitespace
+ */
+static int s_ws_bootstrap_calculate_sec_websocket_accept(
+ struct aws_byte_cursor sec_websocket_key,
+ struct aws_byte_buf *out_buf,
+ struct aws_allocator *alloc) {
+
+ AWS_ASSERT(out_buf && !out_buf->allocator && out_buf->len == 0); /* expect buf to be uninitialized */
+
+ /* note: leading and trailing whitespace was already trimmed by aws_http_headers */
+
+ /* optimization: skip concatenating Sec-WebSocket-Key and the magic string.
+ * just run the SHA1 over the first string, and then the 2nd. */
+
+ bool success = false;
+ struct aws_byte_cursor magic_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
+
+ /* SHA-1 */
+ struct aws_hash *sha1 = aws_sha1_new(alloc);
+ if (!sha1) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to initiate SHA1, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ if (aws_hash_update(sha1, &sec_websocket_key) || aws_hash_update(sha1, &magic_string)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to update SHA1, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ uint8_t sha1_storage[AWS_SHA1_LEN];
+ struct aws_byte_buf sha1_buf = aws_byte_buf_from_empty_array(sha1_storage, sizeof(sha1_storage));
+ if (aws_hash_finalize(sha1, &sha1_buf, 0)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to finalize SHA1, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ /* base64-encoded SHA-1 (clear out_buf, and write to it again) */
+ size_t base64_encode_sha1_len;
+ if (aws_base64_compute_encoded_len(sha1_buf.len, &base64_encode_sha1_len)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to determine Base64-encoded length, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+ aws_byte_buf_init(out_buf, alloc, base64_encode_sha1_len);
+
+ struct aws_byte_cursor sha1_cursor = aws_byte_cursor_from_buf(&sha1_buf);
+ if (aws_base64_encode(&sha1_cursor, out_buf)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to Base64-encode, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ success = true;
+cleanup:
+ if (sha1) {
+ aws_hash_destroy(sha1);
+ }
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+/* Called if something goes wrong after an HTTP connection is established.
+ * The HTTP connection is closed.
+ * We must wait for its shutdown to complete before informing user of the failed websocket setup. */
+static void s_ws_bootstrap_cancel_setup_due_to_err(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ struct aws_http_connection *http_connection,
+ int error_code) {
+
+ AWS_ASSERT(error_code);
+ AWS_ASSERT(http_connection);
+
+ if (!ws_bootstrap->setup_error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Canceling websocket setup due to error %d (%s).",
+ (void *)ws_bootstrap,
+ error_code,
+ aws_error_name(error_code));
+
+ ws_bootstrap->setup_error_code = error_code;
+
+ s_system_vtable->aws_http_connection_close(http_connection);
+ }
+}
+
+static void s_ws_bootstrap_invoke_setup_callback(struct aws_websocket_client_bootstrap *ws_bootstrap, int error_code) {
+
+ /* sanity check: websocket XOR error_code is set. both cannot be set. both cannot be unset */
+ AWS_FATAL_ASSERT((error_code != 0) ^ (ws_bootstrap->websocket != NULL));
+
+ /* Report things about the response, if we received them */
+ int *response_status_ptr = NULL;
+ struct aws_http_header *response_header_array = NULL;
+ size_t num_response_headers = 0;
+ struct aws_byte_cursor *response_body_ptr = NULL;
+ struct aws_byte_cursor response_body_cursor = {.len = 0};
+
+ if (ws_bootstrap->got_full_response_headers) {
+ response_status_ptr = &ws_bootstrap->response_status;
+
+ num_response_headers = aws_http_headers_count(ws_bootstrap->response_headers);
+
+ response_header_array =
+ aws_mem_calloc(ws_bootstrap->alloc, aws_max_size(1, num_response_headers), sizeof(struct aws_http_header));
+
+ for (size_t i = 0; i < num_response_headers; ++i) {
+ aws_http_headers_get_index(ws_bootstrap->response_headers, i, &response_header_array[i]);
+ }
+
+ if (ws_bootstrap->got_full_response_body) {
+ response_body_cursor = aws_byte_cursor_from_buf(&ws_bootstrap->response_body);
+ response_body_ptr = &response_body_cursor;
+ }
+ }
+
+ struct aws_websocket_on_connection_setup_data setup_data = {
+ .error_code = error_code,
+ .websocket = ws_bootstrap->websocket,
+ .handshake_response_status = response_status_ptr,
+ .handshake_response_header_array = response_header_array,
+ .num_handshake_response_headers = num_response_headers,
+ .handshake_response_body = response_body_ptr,
+ };
+
+ ws_bootstrap->websocket_setup_callback(&setup_data, ws_bootstrap->user_data);
+
+ /* Clear setup callback so that we know that it's been invoked. */
+ ws_bootstrap->websocket_setup_callback = NULL;
+
+ if (response_header_array) {
+ aws_mem_release(ws_bootstrap->alloc, response_header_array);
+ }
+}
+
+/* Invoked when HTTP connection has been established (or failed to be established) */
+static void s_ws_bootstrap_on_http_setup(struct aws_http_connection *http_connection, int error_code, void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ /* Setup callback contract is: if error_code is non-zero then connection is NULL. */
+ AWS_FATAL_ASSERT((error_code != 0) == (http_connection == NULL));
+
+ /* If http connection failed, inform the user immediately and clean up the websocket bootstrapper. */
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Websocket setup failed to establish HTTP connection, error %d (%s).",
+ (void *)ws_bootstrap,
+ error_code,
+ aws_error_name(error_code));
+
+ s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, error_code);
+
+ s_ws_bootstrap_destroy(ws_bootstrap);
+ return;
+ }
+
+ /* Connection exists!
+ * Note that if anything goes wrong with websocket setup from hereon out, we must close the http connection
+ * first and wait for shutdown to complete before informing the user of setup failure. */
+
+ /* Send the handshake request */
+ struct aws_http_make_request_options options = {
+ .self_size = sizeof(options),
+ .request = ws_bootstrap->handshake_request,
+ .user_data = ws_bootstrap,
+ .on_response_headers = s_ws_bootstrap_on_handshake_response_headers,
+ .on_response_header_block_done = s_ws_bootstrap_on_handshake_response_header_block_done,
+ .on_response_body = s_ws_bootstrap_on_handshake_response_body,
+ .on_complete = s_ws_bootstrap_on_stream_complete,
+ };
+
+ struct aws_http_stream *handshake_stream =
+ s_system_vtable->aws_http_connection_make_request(http_connection, &options);
+
+ if (!handshake_stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Failed to make websocket upgrade request, error %d (%s).",
+ (void *)ws_bootstrap,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (s_system_vtable->aws_http_stream_activate(handshake_stream)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Failed to activate websocket upgrade request, error %d (%s).",
+ (void *)ws_bootstrap,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Success! (so far) */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: HTTP connection established, sending websocket upgrade request.",
+ (void *)ws_bootstrap);
+ return;
+
+error:
+ s_system_vtable->aws_http_stream_release(handshake_stream);
+ s_ws_bootstrap_cancel_setup_due_to_err(ws_bootstrap, http_connection, aws_last_error());
+}
+
+/* Invoked when the HTTP connection has shut down.
+ * This is never called if the HTTP connection failed its setup */
+static void s_ws_bootstrap_on_http_shutdown(
+ struct aws_http_connection *http_connection,
+ int error_code,
+ void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ /* Inform user that connection has completely shut down.
+ * If setup callback still hasn't fired, invoke it now and indicate failure.
+ * Otherwise, invoke shutdown callback. */
+ if (ws_bootstrap->websocket_setup_callback) {
+ AWS_ASSERT(!ws_bootstrap->websocket);
+
+ /* If there's already a setup_error_code, use that */
+ if (ws_bootstrap->setup_error_code) {
+ error_code = ws_bootstrap->setup_error_code;
+ }
+
+ /* Ensure non-zero error_code is passed */
+ if (!error_code) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Websocket setup failed, error %d (%s).",
+ (void *)ws_bootstrap,
+ error_code,
+ aws_error_name(error_code));
+
+ s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, error_code);
+
+ } else if (ws_bootstrap->websocket_shutdown_callback) {
+ AWS_ASSERT(ws_bootstrap->websocket);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket client connection shut down with error %d (%s).",
+ (void *)ws_bootstrap->websocket,
+ error_code,
+ aws_error_name(error_code));
+
+ ws_bootstrap->websocket_shutdown_callback(ws_bootstrap->websocket, error_code, ws_bootstrap->user_data);
+ }
+
+ /* Clean up HTTP connection and websocket-bootstrap.
+ * It's still up to the user to release the websocket itself. */
+ s_system_vtable->aws_http_connection_release(http_connection);
+
+ s_ws_bootstrap_destroy(ws_bootstrap);
+}
+
+/* Invoked repeatedly as handshake response headers arrive */
+static int s_ws_bootstrap_on_handshake_response_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+
+ (void)stream;
+ (void)header_block;
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ /* Deep-copy headers into ws_bootstrap */
+ aws_http_headers_add_array(ws_bootstrap->response_headers, header_array, num_headers);
+
+ /* Don't report a partially-received response */
+ ws_bootstrap->got_full_response_headers = false;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ws_bootstrap_validate_header(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ const char *name,
+ struct aws_byte_cursor expected_value,
+ bool case_sensitive) {
+
+ struct aws_byte_cursor actual_value;
+ if (aws_http_headers_get(ws_bootstrap->response_headers, aws_byte_cursor_from_c_str(name), &actual_value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response lacks required '%s' header", (void *)ws_bootstrap, name);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ }
+
+ bool matches = case_sensitive ? aws_byte_cursor_eq(&expected_value, &actual_value)
+ : aws_byte_cursor_eq_ignore_case(&expected_value, &actual_value);
+ if (!matches) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response '%s' header has wrong value. Expected '" PRInSTR "'. Received '" PRInSTR "'",
+ (void *)ws_bootstrap,
+ name,
+ AWS_BYTE_CURSOR_PRI(expected_value),
+ AWS_BYTE_CURSOR_PRI(actual_value));
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ws_bootstrap_validate_sec_websocket_protocol(const struct aws_websocket_client_bootstrap *ws_bootstrap) {
+ /* First handle the easy case:
+ * If client requested no protocols, then the response should not pick any */
+ if (ws_bootstrap->expected_sec_websocket_protocols == NULL) {
+ if (aws_http_headers_has(
+ ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"))) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response has 'Sec-WebSocket-Protocol' header, no protocol was requested",
+ (void *)ws_bootstrap);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ /* Check that server has picked one of the protocols listed in the request */
+ struct aws_byte_cursor response_protocol;
+ if (aws_http_headers_get(
+ ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"), &response_protocol)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response lacks required 'Sec-WebSocket-Protocol' header",
+ (void *)ws_bootstrap);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ }
+
+ struct aws_byte_cursor request_protocols =
+ aws_byte_cursor_from_string(ws_bootstrap->expected_sec_websocket_protocols);
+ struct aws_byte_cursor request_protocol_i;
+ AWS_ZERO_STRUCT(request_protocol_i);
+ while (aws_byte_cursor_next_split(&request_protocols, ',', &request_protocol_i)) {
+ struct aws_byte_cursor request_protocol = aws_strutil_trim_http_whitespace(request_protocol_i);
+ if (aws_byte_cursor_eq(&response_protocol, &request_protocol)) {
+ /* Success! */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Server selected Sec-WebSocket-Protocol: " PRInSTR,
+ (void *)ws_bootstrap,
+ AWS_BYTE_CURSOR_PRI(response_protocol));
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response 'Sec-WebSocket-Protocol' header has wrong value. Received '" PRInSTR
+ "'. Expected one of '" PRInSTR "'",
+ (void *)ws_bootstrap,
+ AWS_BYTE_CURSOR_PRI(response_protocol),
+ AWS_BYTE_CURSOR_PRI(request_protocols));
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+}
+
+/* OK, we've got all the headers for the 101 Switching Protocols response.
+ * Validate the handshake response, install the websocket handler into the channel,
+ * and invoke the on_connection_setup callback. */
+static int s_ws_bootstrap_validate_response_and_install_websocket_handler(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ struct aws_http_connection *http_connection) {
+
+ /* RFC-6455 Section 4.1 - The client MUST validate the server's response as follows... */
+
+ /* (we already checked step 1, that status code is 101) */
+ AWS_FATAL_ASSERT(ws_bootstrap->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS);
+
+ /* 2. If the response lacks an |Upgrade| header field or the |Upgrade|
+ * header field contains a value that is not an ASCII case-
+ * insensitive match for the value "websocket", the client MUST
+ * _Fail the WebSocket Connection_. */
+ if (s_ws_bootstrap_validate_header(
+ ws_bootstrap, "Upgrade", aws_byte_cursor_from_c_str("websocket"), false /*case_sensitive*/)) {
+ goto error;
+ }
+
+ /* 3. If the response lacks a |Connection| header field or the
+ * |Connection| header field doesn't contain a token that is an
+ * ASCII case-insensitive match for the value "Upgrade", the client
+ * MUST _Fail the WebSocket Connection_. */
+ if (s_ws_bootstrap_validate_header(
+ ws_bootstrap, "Connection", aws_byte_cursor_from_c_str("Upgrade"), false /*case_sensitive*/)) {
+ goto error;
+ }
+
+ /* 4. If the response lacks a |Sec-WebSocket-Accept| header field or
+ * the |Sec-WebSocket-Accept| contains a value other than the
+ * base64-encoded SHA-1 of the concatenation of the |Sec-WebSocket-
+ * Key| (as a string, not base64-decoded) with the string "258EAFA5-
+ * E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and
+ * trailing whitespace, the client MUST _Fail the WebSocket
+ * Connection_. */
+ if (s_ws_bootstrap_validate_header(
+ ws_bootstrap,
+ "Sec-WebSocket-Accept",
+ aws_byte_cursor_from_buf(&ws_bootstrap->expected_sec_websocket_accept),
+ true /*case_sensitive*/)) {
+ goto error;
+ }
+
+ /* (step 5 is about validating Sec-WebSocket-Extensions, but we don't support extensions) */
+ if (aws_http_headers_has(ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Extensions"))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response has 'Sec-WebSocket-Extensions' header, but client does not support extensions.",
+ (void *)ws_bootstrap);
+ aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ goto error;
+ }
+
+ /* 6. If the response includes a |Sec-WebSocket-Protocol| header field
+ * and this header field indicates the use of a subprotocol that was
+ * not present in the client's handshake (the server has indicated a
+ * subprotocol not requested by the client), the client MUST _Fail
+ * the WebSocket Connection_. */
+ if (s_ws_bootstrap_validate_sec_websocket_protocol(ws_bootstrap)) {
+ goto error;
+ }
+
+ /* Insert websocket handler into channel */
+ struct aws_channel *channel = s_system_vtable->aws_http_connection_get_channel(http_connection);
+ AWS_ASSERT(channel);
+
+ struct aws_websocket_handler_options ws_options = {
+ .allocator = ws_bootstrap->alloc,
+ .channel = channel,
+ .initial_window_size = ws_bootstrap->initial_window_size,
+ .user_data = ws_bootstrap->user_data,
+ .on_incoming_frame_begin = ws_bootstrap->websocket_frame_begin_callback,
+ .on_incoming_frame_payload = ws_bootstrap->websocket_frame_payload_callback,
+ .on_incoming_frame_complete = ws_bootstrap->websocket_frame_complete_callback,
+ .is_server = false,
+ .manual_window_update = ws_bootstrap->manual_window_update,
+ };
+
+ ws_bootstrap->websocket = s_system_vtable->aws_websocket_handler_new(&ws_options);
+ if (!ws_bootstrap->websocket) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Failed to create websocket handler, error %d (%s)",
+ (void *)ws_bootstrap,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ /* Success! Setup complete! */
+ AWS_LOGF_TRACE(/* Log for tracing setup id to websocket id. */
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Setup success, created websocket=%p",
+ (void *)ws_bootstrap,
+ (void *)ws_bootstrap->websocket);
+
+ AWS_LOGF_DEBUG(/* Debug log about creation of websocket. */
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket client connection established.",
+ (void *)ws_bootstrap->websocket);
+
+ s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, 0 /*error_code*/);
+ return AWS_OP_SUCCESS;
+
+error:
+ s_ws_bootstrap_cancel_setup_due_to_err(ws_bootstrap, http_connection, aws_last_error());
+ /* Returning error stops HTTP from processing any further data */
+ return AWS_OP_ERR;
+}
+
+/**
+ * Invoked each time we reach the end of a block of response headers.
+ * If we got a valid 101 Switching Protocols response, we insert the websocket handler.
+ * Note:
+ * In HTTP, 1xx responses are "interim" responses. So a 101 Switching Protocols
+ * response does not "complete" the stream. Once the connection has switched
+ * protocols, the stream does not end until the whole connection is closed.
+ */
+static int s_ws_bootstrap_on_handshake_response_header_block_done(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+ struct aws_http_connection *http_connection = s_system_vtable->aws_http_stream_get_connection(stream);
+ AWS_ASSERT(http_connection);
+
+ /* Get status code from stream */
+ s_system_vtable->aws_http_stream_get_incoming_response_status(stream, &ws_bootstrap->response_status);
+
+ ws_bootstrap->got_full_response_headers = true;
+
+ if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) {
+ if (ws_bootstrap->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS) {
+ /* OK, got 101 response, proceed with upgrade! */
+ return s_ws_bootstrap_validate_response_and_install_websocket_handler(ws_bootstrap, http_connection);
+
+ } else {
+ /* It would be weird to get any other kind of 1xx response, but anything is possible.
+ * Another response should come eventually. Just ignore the headers from this one... */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Server sent interim response with status code %d",
+ (void *)ws_bootstrap,
+ ws_bootstrap->response_status);
+
+ aws_http_headers_clear(ws_bootstrap->response_headers);
+ ws_bootstrap->got_full_response_headers = false;
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ /* Otherwise, we got normal headers (from a non-1xx response), or trailing headers.
+ * This can only happen if the handshake did not succeed. Keep the connection going.
+ * We'll report failed setup to the user after we've received the complete response */
+ ws_bootstrap->setup_error_code = AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE;
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Invoked as we receive the body of a failed response.
+ * This is never invoked if the handshake succeeds.
+ */
+static int s_ws_bootstrap_on_handshake_response_body(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ aws_byte_buf_append_dynamic(&ws_bootstrap->response_body, data);
+
+ /* If we're managing the read window...
+ * bump the HTTP window back to its starting size, so that we keep receiving the whole response. */
+ if (ws_bootstrap->manual_window_update) {
+ s_system_vtable->aws_http_stream_update_window(stream, data->len);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Invoked when the stream completes.
+ *
+ * If the handshake succeeded and the websocket was installed,
+ * then this is invoked at the end of the websocket connection.
+ *
+ * If the handshake response was not 101, then this is invoked
+ * after we've received the whole response.
+ *
+ * Or this is invoked because the connection failed unexpectedly before the handshake could complete,
+ * (or we killed the connection because the 101 response didn't pass validation).
+ */
+static void s_ws_bootstrap_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) {
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+ struct aws_http_connection *http_connection = s_system_vtable->aws_http_stream_get_connection(stream);
+
+ /* Only report the body if we received a complete response */
+ if (error_code == 0) {
+ ws_bootstrap->got_full_response_body = true;
+ }
+
+ /* Make sure the connection closes.
+ * We'll deal with finishing setup or shutdown from the http-shutdown callback */
+ s_system_vtable->aws_http_connection_close(http_connection);
+
+ /* Done with stream, let it be cleaned up */
+ s_system_vtable->aws_http_stream_release(stream);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c b/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
new file mode 100644
index 00000000000..bcaa3c6912c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
@@ -0,0 +1,387 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_decoder.h>
+
+#include <aws/common/encoding.h>
+
+#include <inttypes.h>
+
+typedef int(state_fn)(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data);
+
+/* STATE_INIT: Resets things, consumes no data */
+static int s_state_init(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+ AWS_ZERO_STRUCT(decoder->current_frame);
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_OPCODE_BYTE;
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_OPCODE_BYTE: Decode first byte of frame, which has all kinds of goodies in it. */
+static int s_state_opcode_byte(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ uint8_t byte = data->ptr[0];
+ aws_byte_cursor_advance(data, 1);
+
+ /* first 4 bits are all bools */
+ decoder->current_frame.fin = byte & 0x80;
+ decoder->current_frame.rsv[0] = byte & 0x40;
+ decoder->current_frame.rsv[1] = byte & 0x20;
+ decoder->current_frame.rsv[2] = byte & 0x10;
+
+ /* next 4 bits are opcode */
+ decoder->current_frame.opcode = byte & 0x0F;
+
+ /* RFC-6455 Section 5.2 - Opcode
+ * If an unknown opcode is received, the receiving endpoint MUST _Fail the WebSocket Connection_. */
+ switch (decoder->current_frame.opcode) {
+ case AWS_WEBSOCKET_OPCODE_CONTINUATION:
+ case AWS_WEBSOCKET_OPCODE_TEXT:
+ case AWS_WEBSOCKET_OPCODE_BINARY:
+ case AWS_WEBSOCKET_OPCODE_CLOSE:
+ case AWS_WEBSOCKET_OPCODE_PING:
+ case AWS_WEBSOCKET_OPCODE_PONG:
+ break;
+ default:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Received frame with unknown opcode 0x%" PRIx8,
+ (void *)decoder->user_data,
+ decoder->current_frame.opcode);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ /* RFC-6455 Section 5.2 Fragmentation
+ *
+ * Data frames with the FIN bit clear are considered fragmented and must be followed by
+ * 1+ CONTINUATION frames, where only the final CONTINUATION frame's FIN bit is set.
+ *
+ * Control frames may be injected in the middle of a fragmented message,
+ * but control frames may not be fragmented themselves.
+ */
+ if (aws_websocket_is_data_frame(decoder->current_frame.opcode)) {
+ bool is_continuation_frame = AWS_WEBSOCKET_OPCODE_CONTINUATION == decoder->current_frame.opcode;
+
+ if (decoder->expecting_continuation_data_frame != is_continuation_frame) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Fragmentation error. Received start of new message before end of previous message",
+ (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ decoder->expecting_continuation_data_frame = !decoder->current_frame.fin;
+
+ } else {
+ /* Control frames themselves MUST NOT be fragmented. */
+ if (!decoder->current_frame.fin) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Received fragmented control frame. This is illegal",
+ (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+ }
+
+ if (decoder->current_frame.opcode == AWS_WEBSOCKET_OPCODE_TEXT) {
+ decoder->processing_text_message = true;
+ }
+
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_LENGTH_BYTE;
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_LENGTH_BYTE: Decode byte containing length, determine if we need to decode extended length. */
+static int s_state_length_byte(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ uint8_t byte = data->ptr[0];
+ aws_byte_cursor_advance(data, 1);
+
+ /* first bit is a bool */
+ decoder->current_frame.masked = byte & 0x80;
+
+ /* remaining 7 bits are payload length */
+ decoder->current_frame.payload_length = byte & 0x7F;
+
+ if (decoder->current_frame.payload_length >= AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH) {
+ /* If 7bit payload length has a high value, then the next few bytes contain the real payload length */
+ decoder->state_bytes_processed = 0;
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_EXTENDED_LENGTH;
+ } else {
+ /* If 7bit payload length has low value, that's the actual payload size, jump past EXTENDED_LENGTH state */
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_EXTENDED_LENGTH: Decode extended length (state skipped if no extended length). */
+static int s_state_extended_length(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* The 7bit payload value loaded during the previous state indicated that
+ * actual payload length is encoded across the next 2 or 8 bytes. */
+ uint8_t total_bytes_extended_length;
+ uint64_t min_acceptable_value;
+ uint64_t max_acceptable_value;
+ if (decoder->current_frame.payload_length == AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH) {
+ total_bytes_extended_length = 2;
+ min_acceptable_value = AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE;
+ max_acceptable_value = AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE;
+ } else {
+ AWS_ASSERT(decoder->current_frame.payload_length == AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH);
+
+ total_bytes_extended_length = 8;
+ min_acceptable_value = AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE;
+ max_acceptable_value = AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE;
+ }
+
+ /* Copy bytes of extended-length to state_cache, we'll process them later.*/
+ AWS_ASSERT(total_bytes_extended_length > decoder->state_bytes_processed);
+
+ size_t remaining_bytes = (size_t)(total_bytes_extended_length - decoder->state_bytes_processed);
+ size_t bytes_to_consume = remaining_bytes <= data->len ? remaining_bytes : data->len;
+
+ AWS_ASSERT(bytes_to_consume + decoder->state_bytes_processed <= sizeof(decoder->state_cache));
+
+ memcpy(decoder->state_cache + decoder->state_bytes_processed, data->ptr, bytes_to_consume);
+
+ aws_byte_cursor_advance(data, bytes_to_consume);
+ decoder->state_bytes_processed += bytes_to_consume;
+
+ /* Return, still waiting on more bytes */
+ if (decoder->state_bytes_processed < total_bytes_extended_length) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* All bytes have been copied into state_cache, now read them together as one number,
+ * transforming from network byte order (big endian) to native endianness. */
+ struct aws_byte_cursor cache_cursor = aws_byte_cursor_from_array(decoder->state_cache, total_bytes_extended_length);
+ if (total_bytes_extended_length == 2) {
+ uint16_t val;
+ aws_byte_cursor_read_be16(&cache_cursor, &val);
+ decoder->current_frame.payload_length = val;
+ } else {
+ aws_byte_cursor_read_be64(&cache_cursor, &decoder->current_frame.payload_length);
+ }
+
+ if (decoder->current_frame.payload_length < min_acceptable_value ||
+ decoder->current_frame.payload_length > max_acceptable_value) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to decode payload length", (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK;
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY_CHECK: Determine if we need to decode masking-key. Consumes no data. */
+static int s_state_masking_key_check(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+
+ /* If mask bit was set, move to next state to process 4 bytes of masking key.
+ * Otherwise skip next step, there is no masking key. */
+ if (decoder->current_frame.masked) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY;
+ decoder->state_bytes_processed = 0;
+ } else {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY: Decode masking-key (state skipped if no masking key). */
+static int s_state_masking_key(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_ASSERT(4 > decoder->state_bytes_processed);
+ size_t bytes_remaining = 4 - (size_t)decoder->state_bytes_processed;
+ size_t bytes_to_consume = bytes_remaining < data->len ? bytes_remaining : data->len;
+
+ memcpy(decoder->current_frame.masking_key + decoder->state_bytes_processed, data->ptr, bytes_to_consume);
+
+ aws_byte_cursor_advance(data, bytes_to_consume);
+ decoder->state_bytes_processed += bytes_to_consume;
+
+ /* If all bytes consumed, proceed to next state */
+ if (decoder->state_bytes_processed == 4) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* PAYLOAD_CHECK: Determine if we need to decode a payload. Consumes no data. */
+static int s_state_payload_check(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+
+ /* Invoke on_frame() callback to inform user of non-payload data. */
+ int err = decoder->on_frame(&decoder->current_frame, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* Choose next state: either we have payload to process or we don't. */
+ if (decoder->current_frame.payload_length > 0) {
+ decoder->state_bytes_processed = 0;
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD;
+ } else {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_FRAME_END;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* PAYLOAD: Decode payload until we're done (state skipped if no payload). */
+static int s_state_payload(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_ASSERT(decoder->current_frame.payload_length > decoder->state_bytes_processed);
+ uint64_t bytes_remaining = decoder->current_frame.payload_length - decoder->state_bytes_processed;
+ size_t bytes_to_consume = bytes_remaining < data->len ? (size_t)bytes_remaining : data->len;
+
+ struct aws_byte_cursor payload = aws_byte_cursor_advance(data, bytes_to_consume);
+
+ /* Unmask data, if necessary.
+ * RFC-6455 Section 5.3 Client-to-Server Masking
+ * Each byte of payload is XOR against a byte of the masking-key */
+ if (decoder->current_frame.masked) {
+ uint64_t mask_index = decoder->state_bytes_processed;
+
+ /* Optimization idea: don't do this 1 byte at a time */
+ uint8_t *current_byte = payload.ptr;
+ uint8_t *end_byte = payload.ptr + payload.len;
+ while (current_byte != end_byte) {
+ *current_byte++ ^= decoder->current_frame.masking_key[mask_index++ % 4];
+ }
+ }
+
+ /* TODO: validate payload of CLOSE frame */
+
+ /* Validate the UTF-8 for TEXT messages (a TEXT frame and any subsequent CONTINUATION frames) */
+ if (decoder->processing_text_message && aws_websocket_is_data_frame(decoder->current_frame.opcode)) {
+ if (aws_utf8_decoder_update(decoder->text_message_validator, payload)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Received invalid UTF-8", (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+ }
+
+ /* Invoke on_payload() callback to inform user of payload data */
+ int err = decoder->on_payload(payload, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ decoder->state_bytes_processed += payload.len;
+ AWS_ASSERT(decoder->state_bytes_processed <= decoder->current_frame.payload_length);
+
+ /* If all data consumed, proceed to next state. */
+ if (decoder->state_bytes_processed == decoder->current_frame.payload_length) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_FRAME_END;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* FRAME_END: Perform checks once we reach the end of the frame. */
+static int s_state_frame_end(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+
+ /* If we're done processing a text message (a TEXT frame and any subsequent CONTINUATION frames),
+ * complete the UTF-8 validation */
+ if (decoder->processing_text_message && aws_websocket_is_data_frame(decoder->current_frame.opcode) &&
+ decoder->current_frame.fin) {
+
+ if (aws_utf8_decoder_finalize(decoder->text_message_validator)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Received invalid UTF-8 (incomplete encoding)",
+ (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ decoder->processing_text_message = false;
+ }
+
+ /* Done! */
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_DONE;
+ return AWS_OP_SUCCESS;
+}
+
+static state_fn *s_state_functions[AWS_WEBSOCKET_DECODER_STATE_DONE] = {
+ s_state_init,
+ s_state_opcode_byte,
+ s_state_length_byte,
+ s_state_extended_length,
+ s_state_masking_key_check,
+ s_state_masking_key,
+ s_state_payload_check,
+ s_state_payload,
+ s_state_frame_end,
+};
+
+int aws_websocket_decoder_process(
+ struct aws_websocket_decoder *decoder,
+ struct aws_byte_cursor *data,
+ bool *frame_complete) {
+
+ /* Run state machine until frame is completely decoded, or the state stops changing.
+ * Note that we don't stop looping when data->len reaches zero, because some states consume no data. */
+ while (decoder->state != AWS_WEBSOCKET_DECODER_STATE_DONE) {
+ enum aws_websocket_decoder_state prev_state = decoder->state;
+
+ int err = s_state_functions[decoder->state](decoder, data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (decoder->state == prev_state) {
+ AWS_ASSERT(data->len == 0); /* If no more work to do, all possible data should have been consumed */
+ break;
+ }
+ }
+
+ if (decoder->state == AWS_WEBSOCKET_DECODER_STATE_DONE) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_INIT;
+ *frame_complete = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ *frame_complete = false;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_websocket_decoder_init(
+ struct aws_websocket_decoder *decoder,
+ struct aws_allocator *alloc,
+ aws_websocket_decoder_frame_fn *on_frame,
+ aws_websocket_decoder_payload_fn *on_payload,
+ void *user_data) {
+
+ AWS_ZERO_STRUCT(*decoder);
+ decoder->user_data = user_data;
+ decoder->on_frame = on_frame;
+ decoder->on_payload = on_payload;
+ decoder->text_message_validator = aws_utf8_decoder_new(alloc, NULL /*options*/);
+}
+
+void aws_websocket_decoder_clean_up(struct aws_websocket_decoder *decoder) {
+ aws_utf8_decoder_destroy(decoder->text_message_validator);
+ AWS_ZERO_STRUCT(*decoder);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c b/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
new file mode 100644
index 00000000000..a2fd1989a76
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
@@ -0,0 +1,375 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_encoder.h>
+
+#include <inttypes.h>
+
+typedef int(state_fn)(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf);
+
+/* STATE_INIT: Outputs no data */
+static int s_state_init(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ (void)out_buf;
+
+ if (!encoder->is_frame_in_progress) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_OPCODE_BYTE;
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_OPCODE_BYTE: Outputs 1st byte of frame, which is packed with goodies. */
+static int s_state_opcode_byte(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+
+ AWS_ASSERT((encoder->frame.opcode & 0xF0) == 0); /* Should be impossible, the opcode was checked in start_frame() */
+
+ /* Right 4 bits are opcode, left 4 bits are fin|rsv1|rsv2|rsv3 */
+ uint8_t byte = encoder->frame.opcode;
+ byte |= (encoder->frame.fin << 7);
+ byte |= (encoder->frame.rsv[0] << 6);
+ byte |= (encoder->frame.rsv[1] << 5);
+ byte |= (encoder->frame.rsv[2] << 4);
+
+ /* If buffer has room to write, proceed to next state */
+ if (aws_byte_buf_write_u8(out_buf, byte)) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_LENGTH_BYTE;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_LENGTH_BYTE: Output 2nd byte of frame, which indicates payload length */
+static int s_state_length_byte(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ /* First bit is masking bool */
+ uint8_t byte = (uint8_t)(encoder->frame.masked << 7);
+
+ /* Next 7bits are length, if length is small.
+ * Otherwise next 7bits are a magic number indicating how many bytes will be required to encode actual length */
+ bool extended_length_required;
+
+ if (encoder->frame.payload_length < AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE) {
+ byte |= (uint8_t)encoder->frame.payload_length;
+ extended_length_required = false;
+ } else if (encoder->frame.payload_length <= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE) {
+ byte |= AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH;
+ extended_length_required = true;
+ } else {
+ AWS_ASSERT(encoder->frame.payload_length <= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE);
+ byte |= AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH;
+ extended_length_required = true;
+ }
+
+ /* If buffer has room to write, proceed to next appropriate state */
+ if (aws_byte_buf_write_u8(out_buf, byte)) {
+ if (extended_length_required) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_EXTENDED_LENGTH;
+ encoder->state_bytes_processed = 0;
+ } else {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_EXTENDED_LENGTH: Output extended length (state skipped if not using extended length). */
+static int s_state_extended_length(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ /* Fill tmp buffer with extended-length in network byte order */
+ uint8_t network_bytes_array[8] = {0};
+ struct aws_byte_buf network_bytes_buf =
+ aws_byte_buf_from_empty_array(network_bytes_array, sizeof(network_bytes_array));
+ if (encoder->frame.payload_length <= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE) {
+ aws_byte_buf_write_be16(&network_bytes_buf, (uint16_t)encoder->frame.payload_length);
+ } else {
+ aws_byte_buf_write_be64(&network_bytes_buf, encoder->frame.payload_length);
+ }
+
+ /* Use cursor to iterate over tmp buffer */
+ struct aws_byte_cursor network_bytes_cursor = aws_byte_cursor_from_buf(&network_bytes_buf);
+
+ /* Advance cursor if some bytes already written */
+ aws_byte_cursor_advance(&network_bytes_cursor, (size_t)encoder->state_bytes_processed);
+
+ /* Shorten cursor if it won't all fit in out_buf */
+ bool all_data_written = true;
+ size_t space_available = out_buf->capacity - out_buf->len;
+ if (network_bytes_cursor.len > space_available) {
+ network_bytes_cursor.len = space_available;
+ all_data_written = false;
+ }
+
+ aws_byte_buf_write_from_whole_cursor(out_buf, network_bytes_cursor);
+ encoder->state_bytes_processed += network_bytes_cursor.len;
+
+ /* If all bytes written, advance to next state */
+ if (all_data_written) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY_CHECK: Outputs no data. Gets things ready for (or decides to skip) the STATE_MASKING_KEY */
+static int s_state_masking_key_check(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ (void)out_buf;
+
+ if (encoder->frame.masked) {
+ encoder->state_bytes_processed = 0;
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY;
+ } else {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY: Output masking-key (state skipped if no masking key). */
+static int s_state_masking_key(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ /* Prepare cursor to iterate over masking-key bytes */
+ struct aws_byte_cursor cursor =
+ aws_byte_cursor_from_array(encoder->frame.masking_key, sizeof(encoder->frame.masking_key));
+
+ /* Advance cursor if some bytes already written (moves ptr forward but shortens len so end stays in place) */
+ aws_byte_cursor_advance(&cursor, (size_t)encoder->state_bytes_processed);
+
+ /* Shorten cursor if it won't all fit in out_buf */
+ bool all_data_written = true;
+ size_t space_available = out_buf->capacity - out_buf->len;
+ if (cursor.len > space_available) {
+ cursor.len = space_available;
+ all_data_written = false;
+ }
+
+ aws_byte_buf_write_from_whole_cursor(out_buf, cursor);
+ encoder->state_bytes_processed += cursor.len;
+
+ /* If all bytes written, advance to next state */
+ if (all_data_written) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY_CHECK: Outputs no data. Gets things ready for (or decides to skip) STATE_PAYLOAD */
+static int s_state_payload_check(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ (void)out_buf;
+
+ if (encoder->frame.payload_length > 0) {
+ encoder->state_bytes_processed = 0;
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD;
+ } else {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_DONE;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* PAYLOAD: Output payload until we're done (state skipped if no payload). */
+static int s_state_payload(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+
+ /* Bail early if out_buf has no space for writing */
+ if (out_buf->len >= out_buf->capacity) {
+ return AWS_OP_SUCCESS;
+ }
+
+ const uint64_t prev_bytes_processed = encoder->state_bytes_processed;
+ const struct aws_byte_buf prev_buf = *out_buf;
+
+ /* Invoke callback which will write to buffer */
+ int err = encoder->stream_outgoing_payload(out_buf, encoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* Ensure that user did not commit forbidden acts upon the out_buf */
+ AWS_FATAL_ASSERT(
+ (out_buf->buffer == prev_buf.buffer) && (out_buf->capacity == prev_buf.capacity) &&
+ (out_buf->len >= prev_buf.len));
+
+ size_t bytes_written = out_buf->len - prev_buf.len;
+
+ err = aws_add_u64_checked(encoder->state_bytes_processed, bytes_written, &encoder->state_bytes_processed);
+ if (err) {
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+
+ /* Mask data, if necessary.
+ * RFC-6455 Section 5.3 Client-to-Server Masking
+ * Each byte of payload is XOR against a byte of the masking-key */
+ if (encoder->frame.masked) {
+ uint64_t mask_index = prev_bytes_processed;
+
+ /* Optimization idea: don't do this 1 byte at a time */
+ uint8_t *current_byte = out_buf->buffer + prev_buf.len;
+ uint8_t *end_byte = out_buf->buffer + out_buf->len;
+ while (current_byte != end_byte) {
+ *current_byte++ ^= encoder->frame.masking_key[mask_index++ % 4];
+ }
+ }
+
+ /* If done writing payload, proceed to next state */
+ if (encoder->state_bytes_processed == encoder->frame.payload_length) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_DONE;
+ } else {
+ /* Some more error-checking... */
+ if (encoder->state_bytes_processed > encoder->frame.payload_length) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing stream has exceeded stated payload length of %" PRIu64,
+ (void *)encoder->user_data,
+ encoder->frame.payload_length);
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static state_fn *s_state_functions[AWS_WEBSOCKET_ENCODER_STATE_DONE] = {
+ s_state_init,
+ s_state_opcode_byte,
+ s_state_length_byte,
+ s_state_extended_length,
+ s_state_masking_key_check,
+ s_state_masking_key,
+ s_state_payload_check,
+ s_state_payload,
+};
+
+int aws_websocket_encoder_process(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+
+ /* Run state machine until frame is completely decoded, or the state stops changing.
+ * Note that we don't necessarily stop looping when out_buf is full, because not all states need to output data */
+ while (encoder->state != AWS_WEBSOCKET_ENCODER_STATE_DONE) {
+ const enum aws_websocket_encoder_state prev_state = encoder->state;
+
+ int err = s_state_functions[encoder->state](encoder, out_buf);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (prev_state == encoder->state) {
+ /* dev-assert: Check that each state is doing as much work as it possibly can.
+ * Except for the PAYLOAD state, where it's up to the user to fill the buffer. */
+ AWS_ASSERT((out_buf->len == out_buf->capacity) || (encoder->state == AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD));
+
+ break;
+ }
+ }
+
+ if (encoder->state == AWS_WEBSOCKET_ENCODER_STATE_DONE) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_INIT;
+ encoder->is_frame_in_progress = false;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_websocket_encoder_start_frame(struct aws_websocket_encoder *encoder, const struct aws_websocket_frame *frame) {
+ /* Error-check as much as possible before accepting next frame */
+ if (encoder->is_frame_in_progress) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ /* RFC-6455 Section 5.2 contains all these rules... */
+
+ /* Opcode must fit in 4bits */
+ if (frame->opcode != (frame->opcode & 0x0F)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing frame has unknown opcode 0x%" PRIx8,
+ (void *)encoder->user_data,
+ frame->opcode);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* High bit of 8byte length must be clear */
+ if (frame->payload_length > AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing frame's payload length exceeds the max",
+ (void *)encoder->user_data);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* Data frames with the FIN bit clear are considered fragmented and must be followed by
+ * 1+ CONTINUATION frames, where only the final CONTINUATION frame's FIN bit is set.
+ *
+ * Control frames may be injected in the middle of a fragmented message,
+ * but control frames may not be fragmented themselves. */
+ bool keep_expecting_continuation_data_frame = encoder->expecting_continuation_data_frame;
+ if (aws_websocket_is_data_frame(frame->opcode)) {
+ bool is_continuation_frame = (AWS_WEBSOCKET_OPCODE_CONTINUATION == frame->opcode);
+
+ if (encoder->expecting_continuation_data_frame != is_continuation_frame) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Fragmentation error. Outgoing frame starts a new message but previous message has not ended",
+ (void *)encoder->user_data);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ keep_expecting_continuation_data_frame = !frame->fin;
+ } else {
+ /* Control frames themselves MUST NOT be fragmented. */
+ if (!frame->fin) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: It is illegal to send a fragmented control frame",
+ (void *)encoder->user_data);
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ }
+
+ /* Frame accepted */
+ encoder->frame = *frame;
+ encoder->is_frame_in_progress = true;
+ encoder->expecting_continuation_data_frame = keep_expecting_continuation_data_frame;
+
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_websocket_encoder_is_frame_in_progress(const struct aws_websocket_encoder *encoder) {
+ return encoder->is_frame_in_progress;
+}
+
+void aws_websocket_encoder_init(
+ struct aws_websocket_encoder *encoder,
+ aws_websocket_encoder_payload_fn *stream_outgoing_payload,
+ void *user_data) {
+
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->user_data = user_data;
+ encoder->stream_outgoing_payload = stream_outgoing_payload;
+}
+
+uint64_t aws_websocket_frame_encoded_size(const struct aws_websocket_frame *frame) {
+ /* This is an internal function, so asserts are sufficient error handling */
+ AWS_ASSERT(frame);
+ AWS_ASSERT(frame->payload_length <= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE);
+
+ /* All frames start with at least 2 bytes */
+ uint64_t total = 2;
+
+ /* If masked, add 4 bytes for masking-key */
+ if (frame->masked) {
+ total += 4;
+ }
+
+ /* If extended payload length, add 2 or 8 bytes */
+ if (frame->payload_length >= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE) {
+ total += 8;
+ } else if (frame->payload_length >= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE) {
+ total += 2;
+ }
+
+ /* Plus payload itself */
+ total += frame->payload_length;
+
+ return total;
+}
diff --git a/contrib/restricted/aws/aws-c-http/ya.make b/contrib/restricted/aws/aws-c-http/ya.make
new file mode 100644
index 00000000000..766d17d996e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/ya.make
@@ -0,0 +1,80 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.7.6)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-http/archive/v0.7.6.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-cal
+ contrib/restricted/aws/aws-c-common
+ contrib/restricted/aws/aws-c-compression
+ contrib/restricted/aws/aws-c-io
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-http/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+)
+
+SRCS(
+ source/connection.c
+ source/connection_manager.c
+ source/connection_monitor.c
+ source/h1_connection.c
+ source/h1_decoder.c
+ source/h1_encoder.c
+ source/h1_stream.c
+ source/h2_connection.c
+ source/h2_decoder.c
+ source/h2_frames.c
+ source/h2_stream.c
+ source/hpack.c
+ source/hpack_decoder.c
+ source/hpack_encoder.c
+ source/hpack_huffman_static.c
+ source/http.c
+ source/http2_stream_manager.c
+ source/proxy_connection.c
+ source/proxy_strategy.c
+ source/random_access_set.c
+ source/request_response.c
+ source/statistics.c
+ source/strutil.c
+ source/websocket.c
+ source/websocket_bootstrap.c
+ source/websocket_decoder.c
+ source/websocket_encoder.c
+)
+
+END()