aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/restricted/aws/aws-c-common/source
diff options
context:
space:
mode:
authororivej <orivej@yandex-team.ru>2022-02-10 16:45:01 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:01 +0300
commit2d37894b1b037cf24231090eda8589bbb44fb6fc (patch)
treebe835aa92c6248212e705f25388ebafcf84bc7a1 /contrib/restricted/aws/aws-c-common/source
parent718c552901d703c502ccbefdfc3c9028d608b947 (diff)
downloadydb-2d37894b1b037cf24231090eda8589bbb44fb6fc.tar.gz
Restoring authorship annotation for <orivej@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/restricted/aws/aws-c-common/source')
-rw-r--r--contrib/restricted/aws/aws-c-common/source/allocator.c624
-rw-r--r--contrib/restricted/aws/aws-c-common/source/allocator_sba.c840
-rw-r--r--contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c58
-rw-r--r--contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c206
-rw-r--r--contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c768
-rw-r--r--contrib/restricted/aws/aws-c-common/source/array_list.c52
-rw-r--r--contrib/restricted/aws/aws-c-common/source/assert.c10
-rw-r--r--contrib/restricted/aws/aws-c-common/source/byte_buf.c2226
-rw-r--r--contrib/restricted/aws/aws-c-common/source/cache.c120
-rw-r--r--contrib/restricted/aws/aws-c-common/source/codegen.c28
-rw-r--r--contrib/restricted/aws/aws-c-common/source/command_line_parser.c22
-rw-r--r--contrib/restricted/aws/aws-c-common/source/common.c266
-rw-r--r--contrib/restricted/aws/aws-c-common/source/condition_variable.c6
-rw-r--r--contrib/restricted/aws/aws-c-common/source/date_time.c72
-rw-r--r--contrib/restricted/aws/aws-c-common/source/device_random.c6
-rw-r--r--contrib/restricted/aws/aws-c-common/source/encoding.c54
-rw-r--r--contrib/restricted/aws/aws-c-common/source/error.c156
-rw-r--r--contrib/restricted/aws/aws-c-common/source/fifo_cache.c118
-rw-r--r--contrib/restricted/aws/aws-c-common/source/hash_table.c654
-rw-r--r--contrib/restricted/aws/aws-c-common/source/lifo_cache.c124
-rw-r--r--contrib/restricted/aws/aws-c-common/source/linked_hash_table.c274
-rw-r--r--contrib/restricted/aws/aws-c-common/source/log_channel.c494
-rw-r--r--contrib/restricted/aws/aws-c-common/source/log_formatter.c594
-rw-r--r--contrib/restricted/aws/aws-c-common/source/log_writer.c234
-rw-r--r--contrib/restricted/aws/aws-c-common/source/logging.c1050
-rw-r--r--contrib/restricted/aws/aws-c-common/source/lru_cache.c146
-rw-r--r--contrib/restricted/aws/aws-c-common/source/math.c48
-rw-r--r--contrib/restricted/aws/aws-c-common/source/memtrace.c1054
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/clock.c46
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c48
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/device_random.c12
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/environment.c12
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/mutex.c26
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/process.c106
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c6
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/system_info.c442
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/thread.c300
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/time.c6
-rw-r--r--contrib/restricted/aws/aws-c-common/source/priority_queue.c300
-rw-r--r--contrib/restricted/aws/aws-c-common/source/process_common.c164
-rw-r--r--contrib/restricted/aws/aws-c-common/source/ref_count.c160
-rw-r--r--contrib/restricted/aws/aws-c-common/source/resource_name.c222
-rw-r--r--contrib/restricted/aws/aws-c-common/source/ring_buffer.c642
-rw-r--r--contrib/restricted/aws/aws-c-common/source/statistics.c52
-rw-r--r--contrib/restricted/aws/aws-c-common/source/string.c426
-rw-r--r--contrib/restricted/aws/aws-c-common/source/task_scheduler.c156
-rw-r--r--contrib/restricted/aws/aws-c-common/source/uuid.c10
-rw-r--r--contrib/restricted/aws/aws-c-common/source/xml_parser.c910
48 files changed, 7175 insertions, 7175 deletions
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator.c b/contrib/restricted/aws/aws-c-common/source/allocator.c
index 6613d64579..6ffb531509 100644
--- a/contrib/restricted/aws/aws-c-common/source/allocator.c
+++ b/contrib/restricted/aws/aws-c-common/source/allocator.c
@@ -1,312 +1,312 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/assert.h>
-#include <aws/common/common.h>
-#include <aws/common/logging.h>
-#include <aws/common/math.h>
-
-#include <stdarg.h>
-#include <stdlib.h>
-
-#ifdef _WIN32
-# include <Windows.h>
-#endif
-
-#ifdef __MACH__
-# include <CoreFoundation/CoreFoundation.h>
-#endif
-
-/* turn off unused named parameter warning on msvc.*/
-#ifdef _MSC_VER
-# pragma warning(push)
-# pragma warning(disable : 4100)
-#endif
-
-bool aws_allocator_is_valid(const struct aws_allocator *alloc) {
- /* An allocator must define mem_acquire and mem_release. All other fields are optional */
- return alloc && AWS_OBJECT_PTR_IS_READABLE(alloc) && alloc->mem_acquire && alloc->mem_release;
-}
-
-static void *s_default_malloc(struct aws_allocator *allocator, size_t size) {
- (void)allocator;
- return malloc(size);
-}
-
-static void s_default_free(struct aws_allocator *allocator, void *ptr) {
- (void)allocator;
- free(ptr);
-}
-
-static void *s_default_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) {
- (void)allocator;
- (void)oldsize;
- return realloc(ptr, newsize);
-}
-
-static void *s_default_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- (void)allocator;
- return calloc(num, size);
-}
-
-static struct aws_allocator default_allocator = {
- .mem_acquire = s_default_malloc,
- .mem_release = s_default_free,
- .mem_realloc = s_default_realloc,
- .mem_calloc = s_default_calloc,
-};
-
-struct aws_allocator *aws_default_allocator(void) {
- return &default_allocator;
-}
-
-void *aws_mem_acquire(struct aws_allocator *allocator, size_t size) {
- AWS_FATAL_PRECONDITION(allocator != NULL);
- AWS_FATAL_PRECONDITION(allocator->mem_acquire != NULL);
- /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */
- AWS_FATAL_PRECONDITION(size != 0);
-
- void *mem = allocator->mem_acquire(allocator, size);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- }
- return mem;
-}
-
-void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- AWS_FATAL_PRECONDITION(allocator != NULL);
- AWS_FATAL_PRECONDITION(allocator->mem_calloc || allocator->mem_acquire);
- /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */
- AWS_FATAL_PRECONDITION(num != 0 && size != 0);
-
- /* Defensive check: never use calloc with size * num that would overflow
- * https://wiki.sei.cmu.edu/confluence/display/c/MEM07-C.+Ensure+that+the+arguments+to+calloc%28%29%2C+when+multiplied%2C+do+not+wrap
- */
- size_t required_bytes;
- if (aws_mul_size_checked(num, size, &required_bytes)) {
- return NULL;
- }
-
- /* If there is a defined calloc, use it */
- if (allocator->mem_calloc) {
- void *mem = allocator->mem_calloc(allocator, num, size);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- }
- return mem;
- }
-
- /* Otherwise, emulate calloc */
- void *mem = allocator->mem_acquire(allocator, required_bytes);
- if (!mem) {
- aws_raise_error(AWS_ERROR_OOM);
- return NULL;
- }
- memset(mem, 0, required_bytes);
- AWS_POSTCONDITION(mem != NULL);
- return mem;
-}
-
-#define AWS_ALIGN_ROUND_UP(value, alignment) (((value) + ((alignment)-1)) & ~((alignment)-1))
-
-void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...) {
-
- enum { S_ALIGNMENT = sizeof(intmax_t) };
-
- va_list args_size;
- va_start(args_size, count);
- va_list args_allocs;
- va_copy(args_allocs, args_size);
-
- size_t total_size = 0;
- for (size_t i = 0; i < count; ++i) {
-
- /* Ignore the pointer argument for now */
- va_arg(args_size, void **);
-
- size_t alloc_size = va_arg(args_size, size_t);
- total_size += AWS_ALIGN_ROUND_UP(alloc_size, S_ALIGNMENT);
- }
- va_end(args_size);
-
- void *allocation = NULL;
-
- if (total_size > 0) {
-
- allocation = aws_mem_acquire(allocator, total_size);
- if (!allocation) {
- aws_raise_error(AWS_ERROR_OOM);
- goto cleanup;
- }
-
- uint8_t *current_ptr = allocation;
-
- for (size_t i = 0; i < count; ++i) {
-
- void **out_ptr = va_arg(args_allocs, void **);
-
- size_t alloc_size = va_arg(args_allocs, size_t);
- alloc_size = AWS_ALIGN_ROUND_UP(alloc_size, S_ALIGNMENT);
-
- *out_ptr = current_ptr;
- current_ptr += alloc_size;
- }
- }
-
-cleanup:
- va_end(args_allocs);
- return allocation;
-}
-
-#undef AWS_ALIGN_ROUND_UP
-
-void aws_mem_release(struct aws_allocator *allocator, void *ptr) {
- AWS_FATAL_PRECONDITION(allocator != NULL);
- AWS_FATAL_PRECONDITION(allocator->mem_release != NULL);
-
- if (ptr != NULL) {
- allocator->mem_release(allocator, ptr);
- }
-}
-
-int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, size_t newsize) {
- AWS_FATAL_PRECONDITION(allocator != NULL);
- AWS_FATAL_PRECONDITION(allocator->mem_realloc || allocator->mem_acquire);
- AWS_FATAL_PRECONDITION(allocator->mem_release);
-
- /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */
- if (newsize == 0) {
- aws_mem_release(allocator, *ptr);
- *ptr = NULL;
- return AWS_OP_SUCCESS;
- }
-
- if (allocator->mem_realloc) {
- void *newptr = allocator->mem_realloc(allocator, *ptr, oldsize, newsize);
- if (!newptr) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
- *ptr = newptr;
- return AWS_OP_SUCCESS;
- }
-
- /* Since the allocator doesn't support realloc, we'll need to emulate it (inefficiently). */
- if (oldsize >= newsize) {
- return AWS_OP_SUCCESS;
- }
-
- void *newptr = allocator->mem_acquire(allocator, newsize);
- if (!newptr) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
-
- memcpy(newptr, *ptr, oldsize);
- memset((uint8_t *)newptr + oldsize, 0, newsize - oldsize);
-
- aws_mem_release(allocator, *ptr);
-
- *ptr = newptr;
-
- return AWS_OP_SUCCESS;
-}
-
-/* Wraps a CFAllocator around aws_allocator. For Mac only. */
-#ifdef __MACH__
-
-static CFStringRef s_cf_allocator_description = CFSTR("CFAllocator wrapping aws_allocator.");
-
-/* note we don't have a standard specification stating sizeof(size_t) == sizeof(void *) so we have some extra casts */
-static void *s_cf_allocator_allocate(CFIndex alloc_size, CFOptionFlags hint, void *info) {
- (void)hint;
-
- struct aws_allocator *allocator = info;
-
- void *mem = aws_mem_acquire(allocator, (size_t)alloc_size + sizeof(size_t));
-
- if (!mem) {
- return NULL;
- }
-
- size_t allocation_size = (size_t)alloc_size + sizeof(size_t);
- memcpy(mem, &allocation_size, sizeof(size_t));
- return (void *)((uint8_t *)mem + sizeof(size_t));
-}
-
-static void s_cf_allocator_deallocate(void *ptr, void *info) {
- struct aws_allocator *allocator = info;
-
- void *original_allocation = (uint8_t *)ptr - sizeof(size_t);
-
- aws_mem_release(allocator, original_allocation);
-}
-
-static void *s_cf_allocator_reallocate(void *ptr, CFIndex new_size, CFOptionFlags hint, void *info) {
- (void)hint;
-
- struct aws_allocator *allocator = info;
- AWS_ASSERT(allocator->mem_realloc);
-
- void *original_allocation = (uint8_t *)ptr - sizeof(size_t);
- size_t original_size = 0;
- memcpy(&original_size, original_allocation, sizeof(size_t));
-
- if (aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size)) {
- return NULL;
- }
-
- size_t new_allocation_size = (size_t)new_size;
- memcpy(original_allocation, &new_allocation_size, sizeof(size_t));
-
- return (void *)((uint8_t *)original_allocation + sizeof(size_t));
-}
-
-static CFStringRef s_cf_allocator_copy_description(const void *info) {
- (void)info;
-
- return s_cf_allocator_description;
-}
-
-static CFIndex s_cf_allocator_preferred_size(CFIndex size, CFOptionFlags hint, void *info) {
- (void)hint;
- (void)info;
-
- return size + sizeof(size_t);
-}
-
-CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator) {
- CFAllocatorRef cf_allocator = NULL;
-
- CFAllocatorReallocateCallBack reallocate_callback = NULL;
-
- if (allocator->mem_realloc) {
- reallocate_callback = s_cf_allocator_reallocate;
- }
-
- CFAllocatorContext context = {
- .allocate = s_cf_allocator_allocate,
- .copyDescription = s_cf_allocator_copy_description,
- .deallocate = s_cf_allocator_deallocate,
- .reallocate = reallocate_callback,
- .info = allocator,
- .preferredSize = s_cf_allocator_preferred_size,
- .release = NULL,
- .retain = NULL,
- .version = 0,
- };
-
- cf_allocator = CFAllocatorCreate(NULL, &context);
-
- if (!cf_allocator) {
- aws_raise_error(AWS_ERROR_OOM);
- }
-
- return cf_allocator;
-}
-
-void aws_wrapped_cf_allocator_destroy(CFAllocatorRef allocator) {
- CFRelease(allocator);
-}
-
-#endif /*__MACH__ */
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/assert.h>
+#include <aws/common/common.h>
+#include <aws/common/logging.h>
+#include <aws/common/math.h>
+
+#include <stdarg.h>
+#include <stdlib.h>
+
+#ifdef _WIN32
+# include <Windows.h>
+#endif
+
+#ifdef __MACH__
+# include <CoreFoundation/CoreFoundation.h>
+#endif
+
+/* turn off unused named parameter warning on msvc.*/
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4100)
+#endif
+
+bool aws_allocator_is_valid(const struct aws_allocator *alloc) {
+ /* An allocator must define mem_acquire and mem_release. All other fields are optional */
+ return alloc && AWS_OBJECT_PTR_IS_READABLE(alloc) && alloc->mem_acquire && alloc->mem_release;
+}
+
+static void *s_default_malloc(struct aws_allocator *allocator, size_t size) {
+ (void)allocator;
+ return malloc(size);
+}
+
+static void s_default_free(struct aws_allocator *allocator, void *ptr) {
+ (void)allocator;
+ free(ptr);
+}
+
+static void *s_default_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) {
+ (void)allocator;
+ (void)oldsize;
+ return realloc(ptr, newsize);
+}
+
+static void *s_default_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
+ (void)allocator;
+ return calloc(num, size);
+}
+
+static struct aws_allocator default_allocator = {
+ .mem_acquire = s_default_malloc,
+ .mem_release = s_default_free,
+ .mem_realloc = s_default_realloc,
+ .mem_calloc = s_default_calloc,
+};
+
+struct aws_allocator *aws_default_allocator(void) {
+ return &default_allocator;
+}
+
+void *aws_mem_acquire(struct aws_allocator *allocator, size_t size) {
+ AWS_FATAL_PRECONDITION(allocator != NULL);
+ AWS_FATAL_PRECONDITION(allocator->mem_acquire != NULL);
+ /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */
+ AWS_FATAL_PRECONDITION(size != 0);
+
+ void *mem = allocator->mem_acquire(allocator, size);
+ if (!mem) {
+ aws_raise_error(AWS_ERROR_OOM);
+ }
+ return mem;
+}
+
+void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
+ AWS_FATAL_PRECONDITION(allocator != NULL);
+ AWS_FATAL_PRECONDITION(allocator->mem_calloc || allocator->mem_acquire);
+ /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */
+ AWS_FATAL_PRECONDITION(num != 0 && size != 0);
+
+ /* Defensive check: never use calloc with size * num that would overflow
+ * https://wiki.sei.cmu.edu/confluence/display/c/MEM07-C.+Ensure+that+the+arguments+to+calloc%28%29%2C+when+multiplied%2C+do+not+wrap
+ */
+ size_t required_bytes;
+ if (aws_mul_size_checked(num, size, &required_bytes)) {
+ return NULL;
+ }
+
+ /* If there is a defined calloc, use it */
+ if (allocator->mem_calloc) {
+ void *mem = allocator->mem_calloc(allocator, num, size);
+ if (!mem) {
+ aws_raise_error(AWS_ERROR_OOM);
+ }
+ return mem;
+ }
+
+ /* Otherwise, emulate calloc */
+ void *mem = allocator->mem_acquire(allocator, required_bytes);
+ if (!mem) {
+ aws_raise_error(AWS_ERROR_OOM);
+ return NULL;
+ }
+ memset(mem, 0, required_bytes);
+ AWS_POSTCONDITION(mem != NULL);
+ return mem;
+}
+
+#define AWS_ALIGN_ROUND_UP(value, alignment) (((value) + ((alignment)-1)) & ~((alignment)-1))
+
+void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...) {
+
+ enum { S_ALIGNMENT = sizeof(intmax_t) };
+
+ va_list args_size;
+ va_start(args_size, count);
+ va_list args_allocs;
+ va_copy(args_allocs, args_size);
+
+ size_t total_size = 0;
+ for (size_t i = 0; i < count; ++i) {
+
+ /* Ignore the pointer argument for now */
+ va_arg(args_size, void **);
+
+ size_t alloc_size = va_arg(args_size, size_t);
+ total_size += AWS_ALIGN_ROUND_UP(alloc_size, S_ALIGNMENT);
+ }
+ va_end(args_size);
+
+ void *allocation = NULL;
+
+ if (total_size > 0) {
+
+ allocation = aws_mem_acquire(allocator, total_size);
+ if (!allocation) {
+ aws_raise_error(AWS_ERROR_OOM);
+ goto cleanup;
+ }
+
+ uint8_t *current_ptr = allocation;
+
+ for (size_t i = 0; i < count; ++i) {
+
+ void **out_ptr = va_arg(args_allocs, void **);
+
+ size_t alloc_size = va_arg(args_allocs, size_t);
+ alloc_size = AWS_ALIGN_ROUND_UP(alloc_size, S_ALIGNMENT);
+
+ *out_ptr = current_ptr;
+ current_ptr += alloc_size;
+ }
+ }
+
+cleanup:
+ va_end(args_allocs);
+ return allocation;
+}
+
+#undef AWS_ALIGN_ROUND_UP
+
+void aws_mem_release(struct aws_allocator *allocator, void *ptr) {
+ AWS_FATAL_PRECONDITION(allocator != NULL);
+ AWS_FATAL_PRECONDITION(allocator->mem_release != NULL);
+
+ if (ptr != NULL) {
+ allocator->mem_release(allocator, ptr);
+ }
+}
+
+int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, size_t newsize) {
+ AWS_FATAL_PRECONDITION(allocator != NULL);
+ AWS_FATAL_PRECONDITION(allocator->mem_realloc || allocator->mem_acquire);
+ AWS_FATAL_PRECONDITION(allocator->mem_release);
+
+ /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */
+ if (newsize == 0) {
+ aws_mem_release(allocator, *ptr);
+ *ptr = NULL;
+ return AWS_OP_SUCCESS;
+ }
+
+ if (allocator->mem_realloc) {
+ void *newptr = allocator->mem_realloc(allocator, *ptr, oldsize, newsize);
+ if (!newptr) {
+ return aws_raise_error(AWS_ERROR_OOM);
+ }
+ *ptr = newptr;
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Since the allocator doesn't support realloc, we'll need to emulate it (inefficiently). */
+ if (oldsize >= newsize) {
+ return AWS_OP_SUCCESS;
+ }
+
+ void *newptr = allocator->mem_acquire(allocator, newsize);
+ if (!newptr) {
+ return aws_raise_error(AWS_ERROR_OOM);
+ }
+
+ memcpy(newptr, *ptr, oldsize);
+ memset((uint8_t *)newptr + oldsize, 0, newsize - oldsize);
+
+ aws_mem_release(allocator, *ptr);
+
+ *ptr = newptr;
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Wraps a CFAllocator around aws_allocator. For Mac only. */
+#ifdef __MACH__
+
+static CFStringRef s_cf_allocator_description = CFSTR("CFAllocator wrapping aws_allocator.");
+
+/* note we don't have a standard specification stating sizeof(size_t) == sizeof(void *) so we have some extra casts */
+static void *s_cf_allocator_allocate(CFIndex alloc_size, CFOptionFlags hint, void *info) {
+ (void)hint;
+
+ struct aws_allocator *allocator = info;
+
+ void *mem = aws_mem_acquire(allocator, (size_t)alloc_size + sizeof(size_t));
+
+ if (!mem) {
+ return NULL;
+ }
+
+ size_t allocation_size = (size_t)alloc_size + sizeof(size_t);
+ memcpy(mem, &allocation_size, sizeof(size_t));
+ return (void *)((uint8_t *)mem + sizeof(size_t));
+}
+
+static void s_cf_allocator_deallocate(void *ptr, void *info) {
+ struct aws_allocator *allocator = info;
+
+ void *original_allocation = (uint8_t *)ptr - sizeof(size_t);
+
+ aws_mem_release(allocator, original_allocation);
+}
+
+static void *s_cf_allocator_reallocate(void *ptr, CFIndex new_size, CFOptionFlags hint, void *info) {
+ (void)hint;
+
+ struct aws_allocator *allocator = info;
+ AWS_ASSERT(allocator->mem_realloc);
+
+ void *original_allocation = (uint8_t *)ptr - sizeof(size_t);
+ size_t original_size = 0;
+ memcpy(&original_size, original_allocation, sizeof(size_t));
+
+ if (aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size)) {
+ return NULL;
+ }
+
+ size_t new_allocation_size = (size_t)new_size;
+ memcpy(original_allocation, &new_allocation_size, sizeof(size_t));
+
+ return (void *)((uint8_t *)original_allocation + sizeof(size_t));
+}
+
+static CFStringRef s_cf_allocator_copy_description(const void *info) {
+ (void)info;
+
+ return s_cf_allocator_description;
+}
+
+static CFIndex s_cf_allocator_preferred_size(CFIndex size, CFOptionFlags hint, void *info) {
+ (void)hint;
+ (void)info;
+
+ return size + sizeof(size_t);
+}
+
+CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator) {
+ CFAllocatorRef cf_allocator = NULL;
+
+ CFAllocatorReallocateCallBack reallocate_callback = NULL;
+
+ if (allocator->mem_realloc) {
+ reallocate_callback = s_cf_allocator_reallocate;
+ }
+
+ CFAllocatorContext context = {
+ .allocate = s_cf_allocator_allocate,
+ .copyDescription = s_cf_allocator_copy_description,
+ .deallocate = s_cf_allocator_deallocate,
+ .reallocate = reallocate_callback,
+ .info = allocator,
+ .preferredSize = s_cf_allocator_preferred_size,
+ .release = NULL,
+ .retain = NULL,
+ .version = 0,
+ };
+
+ cf_allocator = CFAllocatorCreate(NULL, &context);
+
+ if (!cf_allocator) {
+ aws_raise_error(AWS_ERROR_OOM);
+ }
+
+ return cf_allocator;
+}
+
+void aws_wrapped_cf_allocator_destroy(CFAllocatorRef allocator) {
+ CFRelease(allocator);
+}
+
+#endif /*__MACH__ */
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
index 127f26da10..d30c67c37e 100644
--- a/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
+++ b/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
@@ -1,420 +1,420 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/allocator.h>
-#include <aws/common/array_list.h>
-#include <aws/common/assert.h>
-#include <aws/common/mutex.h>
-
-/*
- * Small Block Allocator
- * This is a fairly standard approach, the idea is to always allocate aligned pages of memory so that for
- * any address you can round to the nearest page boundary to find the bookkeeping data. The idea is to reduce
- * overhead per alloc and greatly improve runtime speed by doing as little actual allocation work as possible,
- * preferring instead to re-use (hopefully still cached) chunks in FIFO order, or chunking up a page if there's
- * no free chunks. When all chunks in a page are freed, the page is returned to the OS.
- *
- * The allocator itself is simply an array of bins, each representing a power of 2 size from 32 - N (512 tends to be
- * a good upper bound). Thread safety is guaranteed by a mutex per bin, and locks are only necessary around the
- * lowest level alloc and free operations.
- *
- * Note: this allocator gets its internal memory for data structures from the parent allocator, but does not
- * use the parent to allocate pages. Pages are allocated directly from the OS-specific aligned malloc implementation,
- * which allows the OS to do address re-mapping for us instead of over-allocating to fulfill alignment.
- */
-
-#ifdef _WIN32
-# include <malloc.h>
-#elif __linux__ || __APPLE__
-# include <stdlib.h>
-#endif
-
-#if !defined(AWS_SBA_PAGE_SIZE)
-# if defined(PAGE_SIZE)
-# define AWS_SBA_PAGE_SIZE ((uintptr_t)(PAGE_SIZE))
-# else
-# define AWS_SBA_PAGE_SIZE ((uintptr_t)(4096))
-# endif
-#endif
-
-#define AWS_SBA_PAGE_MASK ((uintptr_t) ~(AWS_SBA_PAGE_SIZE - 1))
-#define AWS_SBA_TAG_VALUE 0x736f6d6570736575ULL
-
-/* list of sizes of bins, must be powers of 2, and less than AWS_SBA_PAGE_SIZE * 0.5 */
-#define AWS_SBA_BIN_COUNT 5
-static const size_t s_bin_sizes[AWS_SBA_BIN_COUNT] = {32, 64, 128, 256, 512};
-static const size_t s_max_bin_size = 512;
-
-struct sba_bin {
- size_t size; /* size of allocs in this bin */
- struct aws_mutex mutex; /* lock protecting this bin */
- uint8_t *page_cursor; /* pointer to working page, currently being chunked from */
- struct aws_array_list active_pages; /* all pages in use by this bin, could be optimized at scale by being a set */
- struct aws_array_list free_chunks; /* free chunks available in this bin */
-};
-
-/* Header stored at the base of each page.
- * As long as this is under 32 bytes, all is well.
- * Above that, there's potentially more waste per page */
-struct page_header {
- uint64_t tag; /* marker to identify/validate pages */
- struct sba_bin *bin; /* bin this page belongs to */
- uint32_t alloc_count; /* number of outstanding allocs from this page */
- uint64_t tag2;
-};
-
-/* This is the impl for the aws_allocator */
-struct small_block_allocator {
- struct aws_allocator *allocator; /* parent allocator, for large allocs */
- struct sba_bin bins[AWS_SBA_BIN_COUNT];
- int (*lock)(struct aws_mutex *);
- int (*unlock)(struct aws_mutex *);
-};
-
-static int s_null_lock(struct aws_mutex *mutex) {
- (void)mutex;
- /* NO OP */
- return 0;
-}
-
-static int s_null_unlock(struct aws_mutex *mutex) {
- (void)mutex;
- /* NO OP */
- return 0;
-}
-
-static int s_mutex_lock(struct aws_mutex *mutex) {
- return aws_mutex_lock(mutex);
-}
-
-static int s_mutex_unlock(struct aws_mutex *mutex) {
- return aws_mutex_unlock(mutex);
-}
-
-static void *s_page_base(const void *addr) {
- /* mask off the address to round it to page alignment */
- uint8_t *page_base = (uint8_t *)(((uintptr_t)addr) & AWS_SBA_PAGE_MASK);
- return page_base;
-}
-
-static void *s_page_bind(void *addr, struct sba_bin *bin) {
- /* insert the header at the base of the page and advance past it */
- struct page_header *page = (struct page_header *)addr;
- page->tag = page->tag2 = AWS_SBA_TAG_VALUE;
- page->bin = bin;
- page->alloc_count = 0;
- return (uint8_t *)addr + sizeof(struct page_header);
-}
-
-/* Wraps OS-specific aligned malloc implementation */
-static void *s_aligned_alloc(size_t size, size_t align) {
-#ifdef _WIN32
- return _aligned_malloc(size, align);
-#else
- void *mem = NULL;
- int return_code = posix_memalign(&mem, align, size);
- if (return_code) {
- aws_raise_error(AWS_ERROR_OOM);
- return NULL;
- }
- return mem;
-#endif
-}
-
-/* wraps OS-specific aligned free implementation */
-static void s_aligned_free(void *addr) {
-#ifdef _WIN32
- _aligned_free(addr);
-#else
- free(addr);
-#endif
-}
-
-/* aws_allocator vtable template */
-static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size);
-static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr);
-static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size);
-static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
-
-static struct aws_allocator s_sba_allocator = {
- .mem_acquire = s_sba_mem_acquire,
- .mem_release = s_sba_mem_release,
- .mem_realloc = s_sba_mem_realloc,
- .mem_calloc = s_sba_mem_calloc,
-};
-
-static int s_sba_init(struct small_block_allocator *sba, struct aws_allocator *allocator, bool multi_threaded) {
- sba->allocator = allocator;
- AWS_ZERO_ARRAY(sba->bins);
- sba->lock = multi_threaded ? s_mutex_lock : s_null_lock;
- sba->unlock = multi_threaded ? s_mutex_unlock : s_null_unlock;
-
- for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
- struct sba_bin *bin = &sba->bins[idx];
- bin->size = s_bin_sizes[idx];
- if (multi_threaded && aws_mutex_init(&bin->mutex)) {
- goto cleanup;
- }
- if (aws_array_list_init_dynamic(&bin->active_pages, sba->allocator, 16, sizeof(void *))) {
- goto cleanup;
- }
- /* start with enough chunks for 1 page */
- if (aws_array_list_init_dynamic(
- &bin->free_chunks, sba->allocator, aws_max_size(AWS_SBA_PAGE_SIZE / bin->size, 16), sizeof(void *))) {
- goto cleanup;
- }
- }
-
- return AWS_OP_SUCCESS;
-
-cleanup:
- for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
- struct sba_bin *bin = &sba->bins[idx];
- aws_mutex_clean_up(&bin->mutex);
- aws_array_list_clean_up(&bin->active_pages);
- aws_array_list_clean_up(&bin->free_chunks);
- }
- return AWS_OP_ERR;
-}
-
-static void s_sba_clean_up(struct small_block_allocator *sba) {
- /* free all known pages, then free the working page */
- for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
- struct sba_bin *bin = &sba->bins[idx];
- for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
- void *page_addr = NULL;
- aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
- s_aligned_free(page_addr);
- }
- if (bin->page_cursor) {
- void *page_addr = s_page_base(bin->page_cursor);
- s_aligned_free(page_addr);
- }
-
- aws_array_list_clean_up(&bin->active_pages);
- aws_array_list_clean_up(&bin->free_chunks);
- aws_mutex_clean_up(&bin->mutex);
- }
-}
-
-struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *allocator, bool multi_threaded) {
- struct small_block_allocator *sba = NULL;
- struct aws_allocator *sba_allocator = NULL;
- aws_mem_acquire_many(
- allocator, 2, &sba, sizeof(struct small_block_allocator), &sba_allocator, sizeof(struct aws_allocator));
-
- if (!sba || !sba_allocator) {
- return NULL;
- }
-
- AWS_ZERO_STRUCT(*sba);
- AWS_ZERO_STRUCT(*sba_allocator);
-
- /* copy the template vtable */
- *sba_allocator = s_sba_allocator;
- sba_allocator->impl = sba;
-
- if (s_sba_init(sba, allocator, multi_threaded)) {
- s_sba_clean_up(sba);
- aws_mem_release(allocator, sba);
- return NULL;
- }
- return sba_allocator;
-}
-
-void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator) {
- if (!sba_allocator) {
- return;
- }
- struct small_block_allocator *sba = sba_allocator->impl;
- if (!sba) {
- return;
- }
-
- struct aws_allocator *allocator = sba->allocator;
- s_sba_clean_up(sba);
- aws_mem_release(allocator, sba);
-}
-
-/* NOTE: Expects the mutex to be held by the caller */
-static void *s_sba_alloc_from_bin(struct sba_bin *bin) {
- /* check the free list, hand chunks out in FIFO order */
- if (bin->free_chunks.length > 0) {
- void *chunk = NULL;
- if (aws_array_list_back(&bin->free_chunks, &chunk)) {
- return NULL;
- }
- if (aws_array_list_pop_back(&bin->free_chunks)) {
- return NULL;
- }
-
- AWS_ASSERT(chunk);
- struct page_header *page = s_page_base(chunk);
- page->alloc_count++;
- return chunk;
- }
-
- /* If there is a working page to chunk from, use it */
- if (bin->page_cursor) {
- struct page_header *page = s_page_base(bin->page_cursor);
- AWS_ASSERT(page);
- size_t space_left = AWS_SBA_PAGE_SIZE - (bin->page_cursor - (uint8_t *)page);
- if (space_left >= bin->size) {
- void *chunk = bin->page_cursor;
- page->alloc_count++;
- bin->page_cursor += bin->size;
- space_left -= bin->size;
- if (space_left < bin->size) {
- aws_array_list_push_back(&bin->active_pages, &page);
- bin->page_cursor = NULL;
- }
- return chunk;
- }
- }
-
- /* Nothing free to use, allocate a page and restart */
- uint8_t *new_page = s_aligned_alloc(AWS_SBA_PAGE_SIZE, AWS_SBA_PAGE_SIZE);
- new_page = s_page_bind(new_page, bin);
- bin->page_cursor = new_page;
- return s_sba_alloc_from_bin(bin);
-}
-
-/* NOTE: Expects the mutex to be held by the caller */
-static void s_sba_free_to_bin(struct sba_bin *bin, void *addr) {
- AWS_PRECONDITION(addr);
- struct page_header *page = s_page_base(addr);
- AWS_ASSERT(page->bin == bin);
- page->alloc_count--;
- if (page->alloc_count == 0 && page != s_page_base(bin->page_cursor)) { /* empty page, free it */
- uint8_t *page_start = (uint8_t *)page + sizeof(struct page_header);
- uint8_t *page_end = page_start + AWS_SBA_PAGE_SIZE;
- /* Remove all chunks in the page from the free list */
- intptr_t chunk_idx = bin->free_chunks.length;
- for (; chunk_idx >= 0; --chunk_idx) {
- uint8_t *chunk = NULL;
- aws_array_list_get_at(&bin->free_chunks, &chunk, chunk_idx);
- if (chunk >= page_start && chunk < page_end) {
- aws_array_list_swap(&bin->free_chunks, chunk_idx, bin->free_chunks.length - 1);
- aws_array_list_pop_back(&bin->free_chunks);
- }
- }
-
- /* Find page in pages list and remove it */
- for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
- void *page_addr = NULL;
- aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
- if (page_addr == page) {
- aws_array_list_swap(&bin->active_pages, page_idx, bin->active_pages.length - 1);
- aws_array_list_pop_back(&bin->active_pages);
- break;
- }
- }
- /* ensure that the page tag is erased, in case nearby memory is re-used */
- page->tag = page->tag2 = 0;
- s_aligned_free(page);
- return;
- }
-
- aws_array_list_push_back(&bin->free_chunks, &addr);
-}
-
-/* No lock required for this function, it's all read-only access to constant data */
-static struct sba_bin *s_sba_find_bin(struct small_block_allocator *sba, size_t size) {
- AWS_PRECONDITION(size <= s_max_bin_size);
-
- /* map bits 5(32) to 9(512) to indices 0-4 */
- size_t next_pow2 = 0;
- aws_round_up_to_power_of_two(size, &next_pow2);
- size_t lz = aws_clz_i32((int32_t)next_pow2);
- size_t idx = aws_sub_size_saturating(31 - lz, 5);
- AWS_ASSERT(idx <= 4);
- struct sba_bin *bin = &sba->bins[idx];
- AWS_ASSERT(bin->size >= size);
- return bin;
-}
-
-static void *s_sba_alloc(struct small_block_allocator *sba, size_t size) {
- if (size <= s_max_bin_size) {
- struct sba_bin *bin = s_sba_find_bin(sba, size);
- AWS_FATAL_ASSERT(bin);
- /* BEGIN CRITICAL SECTION */
- sba->lock(&bin->mutex);
- void *mem = s_sba_alloc_from_bin(bin);
- sba->unlock(&bin->mutex);
- /* END CRITICAL SECTION */
- return mem;
- }
- return aws_mem_acquire(sba->allocator, size);
-}
-
-static void s_sba_free(struct small_block_allocator *sba, void *addr) {
- if (!addr) {
- return;
- }
-
- struct page_header *page = (struct page_header *)s_page_base(addr);
- /* Check to see if this page is tagged by the sba */
- /* this check causes a read of (possibly) memory we didn't allocate, but it will always be
- * heap memory, so should not cause any issues. TSan will see this as a data race, but it
- * is not, that's a false positive
- */
- if (page->tag == AWS_SBA_TAG_VALUE && page->tag2 == AWS_SBA_TAG_VALUE) {
- struct sba_bin *bin = page->bin;
- /* BEGIN CRITICAL SECTION */
- sba->lock(&bin->mutex);
- s_sba_free_to_bin(bin, addr);
- sba->unlock(&bin->mutex);
- /* END CRITICAL SECTION */
- return;
- }
- /* large alloc, give back to underlying allocator */
- aws_mem_release(sba->allocator, addr);
-}
-
-static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size) {
- struct small_block_allocator *sba = allocator->impl;
- return s_sba_alloc(sba, size);
-}
-
-static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr) {
- struct small_block_allocator *sba = allocator->impl;
- s_sba_free(sba, ptr);
-}
-
-static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
- struct small_block_allocator *sba = allocator->impl;
- /* If both allocations come from the parent, let the parent do it */
- if (old_size > s_max_bin_size && new_size > s_max_bin_size) {
- void *ptr = old_ptr;
- if (aws_mem_realloc(sba->allocator, &ptr, old_size, new_size)) {
- return NULL;
- }
- return ptr;
- }
-
- if (new_size == 0) {
- s_sba_free(sba, old_ptr);
- return NULL;
- }
-
- if (old_size > new_size) {
- return old_ptr;
- }
-
- void *new_mem = s_sba_alloc(sba, new_size);
- if (old_ptr && old_size) {
- memcpy(new_mem, old_ptr, old_size);
- s_sba_free(sba, old_ptr);
- }
-
- return new_mem;
-}
-
-static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- struct small_block_allocator *sba = allocator->impl;
- void *mem = s_sba_alloc(sba, size * num);
- memset(mem, 0, size * num);
- return mem;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/allocator.h>
+#include <aws/common/array_list.h>
+#include <aws/common/assert.h>
+#include <aws/common/mutex.h>
+
+/*
+ * Small Block Allocator
+ * This is a fairly standard approach, the idea is to always allocate aligned pages of memory so that for
+ * any address you can round to the nearest page boundary to find the bookkeeping data. The idea is to reduce
+ * overhead per alloc and greatly improve runtime speed by doing as little actual allocation work as possible,
+ * preferring instead to re-use (hopefully still cached) chunks in FIFO order, or chunking up a page if there's
+ * no free chunks. When all chunks in a page are freed, the page is returned to the OS.
+ *
+ * The allocator itself is simply an array of bins, each representing a power of 2 size from 32 - N (512 tends to be
+ * a good upper bound). Thread safety is guaranteed by a mutex per bin, and locks are only necessary around the
+ * lowest level alloc and free operations.
+ *
+ * Note: this allocator gets its internal memory for data structures from the parent allocator, but does not
+ * use the parent to allocate pages. Pages are allocated directly from the OS-specific aligned malloc implementation,
+ * which allows the OS to do address re-mapping for us instead of over-allocating to fulfill alignment.
+ */
+
+#ifdef _WIN32
+# include <malloc.h>
+#elif __linux__ || __APPLE__
+# include <stdlib.h>
+#endif
+
+#if !defined(AWS_SBA_PAGE_SIZE)
+# if defined(PAGE_SIZE)
+# define AWS_SBA_PAGE_SIZE ((uintptr_t)(PAGE_SIZE))
+# else
+# define AWS_SBA_PAGE_SIZE ((uintptr_t)(4096))
+# endif
+#endif
+
+#define AWS_SBA_PAGE_MASK ((uintptr_t) ~(AWS_SBA_PAGE_SIZE - 1))
+#define AWS_SBA_TAG_VALUE 0x736f6d6570736575ULL
+
+/* list of sizes of bins, must be powers of 2, and less than AWS_SBA_PAGE_SIZE * 0.5 */
+#define AWS_SBA_BIN_COUNT 5
+static const size_t s_bin_sizes[AWS_SBA_BIN_COUNT] = {32, 64, 128, 256, 512};
+static const size_t s_max_bin_size = 512;
+
+struct sba_bin {
+ size_t size; /* size of allocs in this bin */
+ struct aws_mutex mutex; /* lock protecting this bin */
+ uint8_t *page_cursor; /* pointer to working page, currently being chunked from */
+ struct aws_array_list active_pages; /* all pages in use by this bin, could be optimized at scale by being a set */
+ struct aws_array_list free_chunks; /* free chunks available in this bin */
+};
+
+/* Header stored at the base of each page.
+ * As long as this is under 32 bytes, all is well.
+ * Above that, there's potentially more waste per page */
+struct page_header {
+ uint64_t tag; /* marker to identify/validate pages */
+ struct sba_bin *bin; /* bin this page belongs to */
+ uint32_t alloc_count; /* number of outstanding allocs from this page */
+ uint64_t tag2;
+};
+
+/* This is the impl for the aws_allocator */
+struct small_block_allocator {
+ struct aws_allocator *allocator; /* parent allocator, for large allocs */
+ struct sba_bin bins[AWS_SBA_BIN_COUNT];
+ int (*lock)(struct aws_mutex *);
+ int (*unlock)(struct aws_mutex *);
+};
+
+static int s_null_lock(struct aws_mutex *mutex) {
+ (void)mutex;
+ /* NO OP */
+ return 0;
+}
+
+static int s_null_unlock(struct aws_mutex *mutex) {
+ (void)mutex;
+ /* NO OP */
+ return 0;
+}
+
+static int s_mutex_lock(struct aws_mutex *mutex) {
+ return aws_mutex_lock(mutex);
+}
+
+static int s_mutex_unlock(struct aws_mutex *mutex) {
+ return aws_mutex_unlock(mutex);
+}
+
+static void *s_page_base(const void *addr) {
+ /* mask off the address to round it to page alignment */
+ uint8_t *page_base = (uint8_t *)(((uintptr_t)addr) & AWS_SBA_PAGE_MASK);
+ return page_base;
+}
+
+static void *s_page_bind(void *addr, struct sba_bin *bin) {
+ /* insert the header at the base of the page and advance past it */
+ struct page_header *page = (struct page_header *)addr;
+ page->tag = page->tag2 = AWS_SBA_TAG_VALUE;
+ page->bin = bin;
+ page->alloc_count = 0;
+ return (uint8_t *)addr + sizeof(struct page_header);
+}
+
+/* Wraps OS-specific aligned malloc implementation */
+static void *s_aligned_alloc(size_t size, size_t align) {
+#ifdef _WIN32
+ return _aligned_malloc(size, align);
+#else
+ void *mem = NULL;
+ int return_code = posix_memalign(&mem, align, size);
+ if (return_code) {
+ aws_raise_error(AWS_ERROR_OOM);
+ return NULL;
+ }
+ return mem;
+#endif
+}
+
+/* wraps OS-specific aligned free implementation */
+static void s_aligned_free(void *addr) {
+#ifdef _WIN32
+ _aligned_free(addr);
+#else
+ free(addr);
+#endif
+}
+
+/* aws_allocator vtable template */
+static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size);
+static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr);
+static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size);
+static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
+
+static struct aws_allocator s_sba_allocator = {
+ .mem_acquire = s_sba_mem_acquire,
+ .mem_release = s_sba_mem_release,
+ .mem_realloc = s_sba_mem_realloc,
+ .mem_calloc = s_sba_mem_calloc,
+};
+
+static int s_sba_init(struct small_block_allocator *sba, struct aws_allocator *allocator, bool multi_threaded) {
+ sba->allocator = allocator;
+ AWS_ZERO_ARRAY(sba->bins);
+ sba->lock = multi_threaded ? s_mutex_lock : s_null_lock;
+ sba->unlock = multi_threaded ? s_mutex_unlock : s_null_unlock;
+
+ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
+ struct sba_bin *bin = &sba->bins[idx];
+ bin->size = s_bin_sizes[idx];
+ if (multi_threaded && aws_mutex_init(&bin->mutex)) {
+ goto cleanup;
+ }
+ if (aws_array_list_init_dynamic(&bin->active_pages, sba->allocator, 16, sizeof(void *))) {
+ goto cleanup;
+ }
+ /* start with enough chunks for 1 page */
+ if (aws_array_list_init_dynamic(
+ &bin->free_chunks, sba->allocator, aws_max_size(AWS_SBA_PAGE_SIZE / bin->size, 16), sizeof(void *))) {
+ goto cleanup;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+cleanup:
+ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
+ struct sba_bin *bin = &sba->bins[idx];
+ aws_mutex_clean_up(&bin->mutex);
+ aws_array_list_clean_up(&bin->active_pages);
+ aws_array_list_clean_up(&bin->free_chunks);
+ }
+ return AWS_OP_ERR;
+}
+
+static void s_sba_clean_up(struct small_block_allocator *sba) {
+ /* free all known pages, then free the working page */
+ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
+ struct sba_bin *bin = &sba->bins[idx];
+ for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
+ void *page_addr = NULL;
+ aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
+ s_aligned_free(page_addr);
+ }
+ if (bin->page_cursor) {
+ void *page_addr = s_page_base(bin->page_cursor);
+ s_aligned_free(page_addr);
+ }
+
+ aws_array_list_clean_up(&bin->active_pages);
+ aws_array_list_clean_up(&bin->free_chunks);
+ aws_mutex_clean_up(&bin->mutex);
+ }
+}
+
+struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *allocator, bool multi_threaded) {
+ struct small_block_allocator *sba = NULL;
+ struct aws_allocator *sba_allocator = NULL;
+ aws_mem_acquire_many(
+ allocator, 2, &sba, sizeof(struct small_block_allocator), &sba_allocator, sizeof(struct aws_allocator));
+
+ if (!sba || !sba_allocator) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*sba);
+ AWS_ZERO_STRUCT(*sba_allocator);
+
+ /* copy the template vtable */
+ *sba_allocator = s_sba_allocator;
+ sba_allocator->impl = sba;
+
+ if (s_sba_init(sba, allocator, multi_threaded)) {
+ s_sba_clean_up(sba);
+ aws_mem_release(allocator, sba);
+ return NULL;
+ }
+ return sba_allocator;
+}
+
+void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator) {
+ if (!sba_allocator) {
+ return;
+ }
+ struct small_block_allocator *sba = sba_allocator->impl;
+ if (!sba) {
+ return;
+ }
+
+ struct aws_allocator *allocator = sba->allocator;
+ s_sba_clean_up(sba);
+ aws_mem_release(allocator, sba);
+}
+
+/* NOTE: Expects the mutex to be held by the caller */
+static void *s_sba_alloc_from_bin(struct sba_bin *bin) {
+ /* check the free list, hand chunks out in FIFO order */
+ if (bin->free_chunks.length > 0) {
+ void *chunk = NULL;
+ if (aws_array_list_back(&bin->free_chunks, &chunk)) {
+ return NULL;
+ }
+ if (aws_array_list_pop_back(&bin->free_chunks)) {
+ return NULL;
+ }
+
+ AWS_ASSERT(chunk);
+ struct page_header *page = s_page_base(chunk);
+ page->alloc_count++;
+ return chunk;
+ }
+
+ /* If there is a working page to chunk from, use it */
+ if (bin->page_cursor) {
+ struct page_header *page = s_page_base(bin->page_cursor);
+ AWS_ASSERT(page);
+ size_t space_left = AWS_SBA_PAGE_SIZE - (bin->page_cursor - (uint8_t *)page);
+ if (space_left >= bin->size) {
+ void *chunk = bin->page_cursor;
+ page->alloc_count++;
+ bin->page_cursor += bin->size;
+ space_left -= bin->size;
+ if (space_left < bin->size) {
+ aws_array_list_push_back(&bin->active_pages, &page);
+ bin->page_cursor = NULL;
+ }
+ return chunk;
+ }
+ }
+
+ /* Nothing free to use, allocate a page and restart */
+ uint8_t *new_page = s_aligned_alloc(AWS_SBA_PAGE_SIZE, AWS_SBA_PAGE_SIZE);
+ new_page = s_page_bind(new_page, bin);
+ bin->page_cursor = new_page;
+ return s_sba_alloc_from_bin(bin);
+}
+
+/* NOTE: Expects the mutex to be held by the caller */
+static void s_sba_free_to_bin(struct sba_bin *bin, void *addr) {
+ AWS_PRECONDITION(addr);
+ struct page_header *page = s_page_base(addr);
+ AWS_ASSERT(page->bin == bin);
+ page->alloc_count--;
+ if (page->alloc_count == 0 && page != s_page_base(bin->page_cursor)) { /* empty page, free it */
+ uint8_t *page_start = (uint8_t *)page + sizeof(struct page_header);
+ uint8_t *page_end = page_start + AWS_SBA_PAGE_SIZE;
+ /* Remove all chunks in the page from the free list */
+ intptr_t chunk_idx = bin->free_chunks.length;
+ for (; chunk_idx >= 0; --chunk_idx) {
+ uint8_t *chunk = NULL;
+ aws_array_list_get_at(&bin->free_chunks, &chunk, chunk_idx);
+ if (chunk >= page_start && chunk < page_end) {
+ aws_array_list_swap(&bin->free_chunks, chunk_idx, bin->free_chunks.length - 1);
+ aws_array_list_pop_back(&bin->free_chunks);
+ }
+ }
+
+ /* Find page in pages list and remove it */
+ for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
+ void *page_addr = NULL;
+ aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
+ if (page_addr == page) {
+ aws_array_list_swap(&bin->active_pages, page_idx, bin->active_pages.length - 1);
+ aws_array_list_pop_back(&bin->active_pages);
+ break;
+ }
+ }
+ /* ensure that the page tag is erased, in case nearby memory is re-used */
+ page->tag = page->tag2 = 0;
+ s_aligned_free(page);
+ return;
+ }
+
+ aws_array_list_push_back(&bin->free_chunks, &addr);
+}
+
+/* No lock required for this function, it's all read-only access to constant data */
+static struct sba_bin *s_sba_find_bin(struct small_block_allocator *sba, size_t size) {
+ AWS_PRECONDITION(size <= s_max_bin_size);
+
+ /* map bits 5(32) to 9(512) to indices 0-4 */
+ size_t next_pow2 = 0;
+ aws_round_up_to_power_of_two(size, &next_pow2);
+ size_t lz = aws_clz_i32((int32_t)next_pow2);
+ size_t idx = aws_sub_size_saturating(31 - lz, 5);
+ AWS_ASSERT(idx <= 4);
+ struct sba_bin *bin = &sba->bins[idx];
+ AWS_ASSERT(bin->size >= size);
+ return bin;
+}
+
+static void *s_sba_alloc(struct small_block_allocator *sba, size_t size) {
+ if (size <= s_max_bin_size) {
+ struct sba_bin *bin = s_sba_find_bin(sba, size);
+ AWS_FATAL_ASSERT(bin);
+ /* BEGIN CRITICAL SECTION */
+ sba->lock(&bin->mutex);
+ void *mem = s_sba_alloc_from_bin(bin);
+ sba->unlock(&bin->mutex);
+ /* END CRITICAL SECTION */
+ return mem;
+ }
+ return aws_mem_acquire(sba->allocator, size);
+}
+
+static void s_sba_free(struct small_block_allocator *sba, void *addr) {
+ if (!addr) {
+ return;
+ }
+
+ struct page_header *page = (struct page_header *)s_page_base(addr);
+ /* Check to see if this page is tagged by the sba */
+ /* this check causes a read of (possibly) memory we didn't allocate, but it will always be
+ * heap memory, so should not cause any issues. TSan will see this as a data race, but it
+ * is not, that's a false positive
+ */
+ if (page->tag == AWS_SBA_TAG_VALUE && page->tag2 == AWS_SBA_TAG_VALUE) {
+ struct sba_bin *bin = page->bin;
+ /* BEGIN CRITICAL SECTION */
+ sba->lock(&bin->mutex);
+ s_sba_free_to_bin(bin, addr);
+ sba->unlock(&bin->mutex);
+ /* END CRITICAL SECTION */
+ return;
+ }
+ /* large alloc, give back to underlying allocator */
+ aws_mem_release(sba->allocator, addr);
+}
+
+static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size) {
+ struct small_block_allocator *sba = allocator->impl;
+ return s_sba_alloc(sba, size);
+}
+
+static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr) {
+ struct small_block_allocator *sba = allocator->impl;
+ s_sba_free(sba, ptr);
+}
+
+static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
+ struct small_block_allocator *sba = allocator->impl;
+ /* If both allocations come from the parent, let the parent do it */
+ if (old_size > s_max_bin_size && new_size > s_max_bin_size) {
+ void *ptr = old_ptr;
+ if (aws_mem_realloc(sba->allocator, &ptr, old_size, new_size)) {
+ return NULL;
+ }
+ return ptr;
+ }
+
+ if (new_size == 0) {
+ s_sba_free(sba, old_ptr);
+ return NULL;
+ }
+
+ if (old_size > new_size) {
+ return old_ptr;
+ }
+
+ void *new_mem = s_sba_alloc(sba, new_size);
+ if (old_ptr && old_size) {
+ memcpy(new_mem, old_ptr, old_size);
+ s_sba_free(sba, old_ptr);
+ }
+
+ return new_mem;
+}
+
+static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
+ struct small_block_allocator *sba = allocator->impl;
+ void *mem = s_sba_alloc(sba, size * num);
+ memset(mem, 0, size * num);
+ return mem;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c
index 0797017779..d2ceab0106 100644
--- a/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c
+++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/asm/cpuid.c
@@ -1,29 +1,29 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/cpuid.h>
-
-void aws_run_cpuid(uint32_t eax, uint32_t ecx, uint32_t *abcd) {
- uint32_t ebx = 0;
- uint32_t edx = 0;
-
-#if defined(__i386__) && defined(__PIC__)
- /* in case of PIC under 32-bit EBX cannot be clobbered */
- __asm__ __volatile__("movl %%ebx, %%edi \n\t "
- "cpuid \n\t "
- "xchgl %%ebx, %%edi"
- : "=D"(ebx),
-#else
- __asm__ __volatile__("cpuid"
- : "+b"(ebx),
-#endif
- "+a"(eax),
- "+c"(ecx),
- "=d"(edx));
- abcd[0] = eax;
- abcd[1] = ebx;
- abcd[2] = ecx;
- abcd[3] = edx;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/cpuid.h>
+
+void aws_run_cpuid(uint32_t eax, uint32_t ecx, uint32_t *abcd) {
+ uint32_t ebx = 0;
+ uint32_t edx = 0;
+
+#if defined(__i386__) && defined(__PIC__)
+ /* in case of PIC under 32-bit EBX cannot be clobbered */
+ __asm__ __volatile__("movl %%ebx, %%edi \n\t "
+ "cpuid \n\t "
+ "xchgl %%ebx, %%edi"
+ : "=D"(ebx),
+#else
+ __asm__ __volatile__("cpuid"
+ : "+b"(ebx),
+#endif
+ "+a"(eax),
+ "+c"(ecx),
+ "=d"(edx));
+ abcd[0] = eax;
+ abcd[1] = ebx;
+ abcd[2] = ecx;
+ abcd[3] = edx;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
index ddd9640c62..6385c146fb 100644
--- a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
+++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
@@ -1,103 +1,103 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-/*
- * MSVC wants us to use the non-portable _dupenv_s instead; since we need
- * to remain portable, tell MSVC to suppress this warning.
- */
-#define _CRT_SECURE_NO_WARNINGS
-
-#include <aws/common/cpuid.h>
-#include <stdlib.h>
-
-extern void aws_run_cpuid(uint32_t eax, uint32_t ecx, uint32_t *abcd);
-
-typedef bool(has_feature_fn)(void);
-
-static bool s_has_clmul(void) {
- uint32_t abcd[4];
- uint32_t clmul_mask = 0x00000002;
- aws_run_cpuid(1, 0, abcd);
-
- if ((abcd[2] & clmul_mask) != clmul_mask)
- return false;
-
- return true;
-}
-
-static bool s_has_sse41(void) {
- uint32_t abcd[4];
- uint32_t sse41_mask = 0x00080000;
- aws_run_cpuid(1, 0, abcd);
-
- if ((abcd[2] & sse41_mask) != sse41_mask)
- return false;
-
- return true;
-}
-
-static bool s_has_sse42(void) {
- uint32_t abcd[4];
- uint32_t sse42_mask = 0x00100000;
- aws_run_cpuid(1, 0, abcd);
-
- if ((abcd[2] & sse42_mask) != sse42_mask)
- return false;
-
- return true;
-}
-
-static bool s_has_avx2(void) {
- uint32_t abcd[4];
- uint32_t avx2_bmi12_mask = (1 << 5) | (1 << 3) | (1 << 8);
- /* CPUID.(EAX=01H, ECX=0H):ECX.FMA[bit 12]==1 &&
- CPUID.(EAX=01H, ECX=0H):ECX.MOVBE[bit 22]==1 &&
- CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1 */
- aws_run_cpuid(7, 0, abcd);
-
- if ((abcd[1] & avx2_bmi12_mask) != avx2_bmi12_mask)
- return false;
-
- return true;
-}
-
-has_feature_fn *s_check_cpu_feature[AWS_CPU_FEATURE_COUNT] = {
- [AWS_CPU_FEATURE_CLMUL] = s_has_clmul,
- [AWS_CPU_FEATURE_SSE_4_1] = s_has_sse41,
- [AWS_CPU_FEATURE_SSE_4_2] = s_has_sse42,
- [AWS_CPU_FEATURE_AVX2] = s_has_avx2,
-};
-
-bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) {
- if (s_check_cpu_feature[feature_name])
- return s_check_cpu_feature[feature_name]();
- return false;
-}
-
-#define CPUID_AVAILABLE 0
-#define CPUID_UNAVAILABLE 1
-static int cpuid_state = 2;
-
-bool aws_common_private_has_avx2(void) {
- if (AWS_LIKELY(cpuid_state == 0)) {
- return true;
- }
- if (AWS_LIKELY(cpuid_state == 1)) {
- return false;
- }
-
- /* Provide a hook for testing fallbacks and benchmarking */
- const char *env_avx2_enabled = getenv("AWS_COMMON_AVX2");
- if (env_avx2_enabled) {
- int is_enabled = atoi(env_avx2_enabled);
- cpuid_state = !is_enabled;
- return is_enabled;
- }
-
- bool available = aws_cpu_has_feature(AWS_CPU_FEATURE_AVX2);
- cpuid_state = available ? CPUID_AVAILABLE : CPUID_UNAVAILABLE;
-
- return available;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/*
+ * MSVC wants us to use the non-portable _dupenv_s instead; since we need
+ * to remain portable, tell MSVC to suppress this warning.
+ */
+#define _CRT_SECURE_NO_WARNINGS
+
+#include <aws/common/cpuid.h>
+#include <stdlib.h>
+
+extern void aws_run_cpuid(uint32_t eax, uint32_t ecx, uint32_t *abcd);
+
+typedef bool(has_feature_fn)(void);
+
+static bool s_has_clmul(void) {
+ uint32_t abcd[4];
+ uint32_t clmul_mask = 0x00000002;
+ aws_run_cpuid(1, 0, abcd);
+
+ if ((abcd[2] & clmul_mask) != clmul_mask)
+ return false;
+
+ return true;
+}
+
+static bool s_has_sse41(void) {
+ uint32_t abcd[4];
+ uint32_t sse41_mask = 0x00080000;
+ aws_run_cpuid(1, 0, abcd);
+
+ if ((abcd[2] & sse41_mask) != sse41_mask)
+ return false;
+
+ return true;
+}
+
+static bool s_has_sse42(void) {
+ uint32_t abcd[4];
+ uint32_t sse42_mask = 0x00100000;
+ aws_run_cpuid(1, 0, abcd);
+
+ if ((abcd[2] & sse42_mask) != sse42_mask)
+ return false;
+
+ return true;
+}
+
+static bool s_has_avx2(void) {
+ uint32_t abcd[4];
+ uint32_t avx2_bmi12_mask = (1 << 5) | (1 << 3) | (1 << 8);
+ /* CPUID.(EAX=01H, ECX=0H):ECX.FMA[bit 12]==1 &&
+ CPUID.(EAX=01H, ECX=0H):ECX.MOVBE[bit 22]==1 &&
+ CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1 */
+ aws_run_cpuid(7, 0, abcd);
+
+ if ((abcd[1] & avx2_bmi12_mask) != avx2_bmi12_mask)
+ return false;
+
+ return true;
+}
+
+has_feature_fn *s_check_cpu_feature[AWS_CPU_FEATURE_COUNT] = {
+ [AWS_CPU_FEATURE_CLMUL] = s_has_clmul,
+ [AWS_CPU_FEATURE_SSE_4_1] = s_has_sse41,
+ [AWS_CPU_FEATURE_SSE_4_2] = s_has_sse42,
+ [AWS_CPU_FEATURE_AVX2] = s_has_avx2,
+};
+
+bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) {
+ if (s_check_cpu_feature[feature_name])
+ return s_check_cpu_feature[feature_name]();
+ return false;
+}
+
+#define CPUID_AVAILABLE 0
+#define CPUID_UNAVAILABLE 1
+static int cpuid_state = 2;
+
+bool aws_common_private_has_avx2(void) {
+ if (AWS_LIKELY(cpuid_state == 0)) {
+ return true;
+ }
+ if (AWS_LIKELY(cpuid_state == 1)) {
+ return false;
+ }
+
+ /* Provide a hook for testing fallbacks and benchmarking */
+ const char *env_avx2_enabled = getenv("AWS_COMMON_AVX2");
+ if (env_avx2_enabled) {
+ int is_enabled = atoi(env_avx2_enabled);
+ cpuid_state = !is_enabled;
+ return is_enabled;
+ }
+
+ bool available = aws_cpu_has_feature(AWS_CPU_FEATURE_AVX2);
+ cpuid_state = available ? CPUID_AVAILABLE : CPUID_UNAVAILABLE;
+
+ return available;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c
index d4b580f24a..ebae861381 100644
--- a/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c
+++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/encoding_avx2.c
@@ -1,384 +1,384 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <emmintrin.h>
-#include <immintrin.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <aws/common/common.h>
-
-/***** Decode logic *****/
-
-/*
- * Decodes ranges of bytes in place
- * For each byte of 'in' that is between lo and hi (inclusive), adds offset and _adds_ it to the corresponding offset in
- * out.
- */
-static inline __m256i translate_range(__m256i in, uint8_t lo, uint8_t hi, uint8_t offset) {
- __m256i lovec = _mm256_set1_epi8(lo);
- __m256i hivec = _mm256_set1_epi8((char)(hi - lo));
- __m256i offsetvec = _mm256_set1_epi8(offset);
-
- __m256i tmp = _mm256_sub_epi8(in, lovec);
- /*
- * we'll use the unsigned min operator to do our comparison. Note that
- * there's no unsigned compare as a comparison intrinsic.
- */
- __m256i mask = _mm256_min_epu8(tmp, hivec);
- /* if mask = tmp, then keep that byte */
- mask = _mm256_cmpeq_epi8(mask, tmp);
-
- tmp = _mm256_add_epi8(tmp, offsetvec);
- tmp = _mm256_and_si256(tmp, mask);
- return tmp;
-}
-
-/*
- * For each 8-bit element in in, if the element equals match, add to the corresponding element in out the value decode.
- */
-static inline __m256i translate_exact(__m256i in, uint8_t match, uint8_t decode) {
- __m256i mask = _mm256_cmpeq_epi8(in, _mm256_set1_epi8(match));
- return _mm256_and_si256(mask, _mm256_set1_epi8(decode));
-}
-
-/*
- * Input: a pointer to a 256-bit vector of base64 characters
- * The pointed-to-vector is replaced by a 256-bit vector of 6-bit decoded parts;
- * on decode failure, returns false, else returns true on success.
- */
-static inline bool decode_vec(__m256i *in) {
- __m256i tmp1, tmp2, tmp3;
-
- /*
- * Base64 decoding table, see RFC4648
- *
- * Note that we use multiple vector registers to try to allow the CPU to
- * paralellize the merging ORs
- */
- tmp1 = translate_range(*in, 'A', 'Z', 0 + 1);
- tmp2 = translate_range(*in, 'a', 'z', 26 + 1);
- tmp3 = translate_range(*in, '0', '9', 52 + 1);
- tmp1 = _mm256_or_si256(tmp1, translate_exact(*in, '+', 62 + 1));
- tmp2 = _mm256_or_si256(tmp2, translate_exact(*in, '/', 63 + 1));
- tmp3 = _mm256_or_si256(tmp3, _mm256_or_si256(tmp1, tmp2));
-
- /*
- * We use 0 to mark decode failures, so everything is decoded to one higher
- * than normal. We'll shift this down now.
- */
- *in = _mm256_sub_epi8(tmp3, _mm256_set1_epi8(1));
-
- /* If any byte is now zero, we had a decode failure */
- __m256i mask = _mm256_cmpeq_epi8(tmp3, _mm256_set1_epi8(0));
- return _mm256_testz_si256(mask, mask);
-}
-
-AWS_ALIGNED_TYPEDEF(uint8_t, aligned256[32], 32);
-
-/*
- * Input: a 256-bit vector, interpreted as 32 * 6-bit values
- * Output: a 256-bit vector, the lower 24 bytes of which contain the packed version of the input
- */
-static inline __m256i pack_vec(__m256i in) {
- /*
- * Our basic strategy is to split the input vector into three vectors, for each 6-bit component
- * of each 24-bit group, shift the groups into place, then OR the vectors together. Conveniently,
- * we can do this on a (32 bit) dword-by-dword basis.
- *
- * It's important to note that we're interpreting the vector as being little-endian. That is,
- * on entry, we have dwords that look like this:
- *
- * MSB LSB
- * 00DD DDDD 00CC CCCC 00BB BBBB 00AA AAAA
- *
- * And we want to translate to:
- *
- * MSB LSB
- * 0000 0000 AAAA AABB BBBB CCCC CCDD DDDD
- *
- * After which point we can pack these dwords together to produce our final output.
- */
- __m256i maskA = _mm256_set1_epi32(0xFF); // low bits
- __m256i maskB = _mm256_set1_epi32(0xFF00);
- __m256i maskC = _mm256_set1_epi32(0xFF0000);
- __m256i maskD = _mm256_set1_epi32((int)0xFF000000);
-
- __m256i bitsA = _mm256_slli_epi32(_mm256_and_si256(in, maskA), 18);
- __m256i bitsB = _mm256_slli_epi32(_mm256_and_si256(in, maskB), 4);
- __m256i bitsC = _mm256_srli_epi32(_mm256_and_si256(in, maskC), 10);
- __m256i bitsD = _mm256_srli_epi32(_mm256_and_si256(in, maskD), 24);
-
- __m256i dwords = _mm256_or_si256(_mm256_or_si256(bitsA, bitsB), _mm256_or_si256(bitsC, bitsD));
- /*
- * Now we have a series of dwords with empty MSBs.
- * We need to pack them together (and shift down) with a shuffle operation.
- * Unfortunately the shuffle operation operates independently within each 128-bit lane,
- * so we'll need to do this in two steps: First we compact dwords within each lane, then
- * we do a dword shuffle to compact the two lanes together.
-
- * 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00 <- byte index (little endian)
- * -- 09 0a 0b -- 06 07 08 -- 03 04 05 -- 00 01 02 <- data index
- *
- * We also reverse the order of 3-byte fragments within each lane; we've constructed
- * those fragments in little endian but the order of fragments within the overall
- * vector is in memory order (big endian)
- */
- const aligned256 shufvec_buf = {
- /* clang-format off */
- /* MSB */
- 0xFF, 0xFF, 0xFF, 0xFF, /* Zero out the top 4 bytes of the lane */
- 2, 1, 0,
- 6, 5, 4,
- 10, 9, 8,
- 14, 13, 12,
-
- 0xFF, 0xFF, 0xFF, 0xFF, /* Zero out the top 4 bytes of the lane */
- 2, 1, 0,
- 6, 5, 4,
- 10, 9, 8,
- 14, 13, 12
- /* LSB */
- /* clang-format on */
- };
- __m256i shufvec = _mm256_load_si256((__m256i const *)&shufvec_buf);
-
- dwords = _mm256_shuffle_epi8(dwords, shufvec);
- /*
- * Now shuffle the 32-bit words:
- * A B C 0 D E F 0 -> 0 0 A B C D E F
- */
- __m256i shuf32 = _mm256_set_epi32(0, 0, 7, 6, 5, 3, 2, 1);
-
- dwords = _mm256_permutevar8x32_epi32(dwords, shuf32);
-
- return dwords;
-}
-
-static inline bool decode(const unsigned char *in, unsigned char *out) {
- __m256i vec = _mm256_loadu_si256((__m256i const *)in);
- if (!decode_vec(&vec)) {
- return false;
- }
- vec = pack_vec(vec);
-
- /*
- * We'll do overlapping writes to get both the low 128 bits and the high 64-bits written.
- * Input (memory order): 0 1 2 3 4 5 - - (dwords)
- * Input (little endian) - - 5 4 3 2 1 0
- * Output in memory:
- * [0 1 2 3] [4 5]
- */
- __m128i lo = _mm256_extracti128_si256(vec, 0);
- /*
- * Unfortunately some compilers don't support _mm256_extract_epi64,
- * so we'll just copy right out of the vector as a fallback
- */
-
-#ifdef HAVE_MM256_EXTRACT_EPI64
- uint64_t hi = _mm256_extract_epi64(vec, 2);
- const uint64_t *p_hi = &hi;
-#else
- const uint64_t *p_hi = (uint64_t *)&vec + 2;
-#endif
-
- _mm_storeu_si128((__m128i *)out, lo);
- memcpy(out + 16, p_hi, sizeof(*p_hi));
-
- return true;
-}
-
-size_t aws_common_private_base64_decode_sse41(const unsigned char *in, unsigned char *out, size_t len) {
- if (len % 4) {
- return (size_t)-1;
- }
-
- size_t outlen = 0;
- while (len > 32) {
- if (!decode(in, out)) {
- return (size_t)-1;
- }
- len -= 32;
- in += 32;
- out += 24;
- outlen += 24;
- }
-
- if (len > 0) {
- unsigned char tmp_in[32];
- unsigned char tmp_out[24];
-
- memset(tmp_out, 0xEE, sizeof(tmp_out));
-
- /* We need to ensure the vector contains valid b64 characters */
- memset(tmp_in, 'A', sizeof(tmp_in));
- memcpy(tmp_in, in, len);
-
- size_t final_out = (3 * len) / 4;
-
- /* Check for end-of-string padding (up to 2 characters) */
- for (int i = 0; i < 2; i++) {
- if (tmp_in[len - 1] == '=') {
- tmp_in[len - 1] = 'A'; /* make sure the inner loop doesn't bail out */
- len--;
- final_out--;
- }
- }
-
- if (!decode(tmp_in, tmp_out)) {
- return (size_t)-1;
- }
-
- /* Check that there are no trailing ones bits */
- for (size_t i = final_out; i < sizeof(tmp_out); i++) {
- if (tmp_out[i]) {
- return (size_t)-1;
- }
- }
-
- memcpy(out, tmp_out, final_out);
- outlen += final_out;
- }
- return outlen;
-}
-
-/***** Encode logic *****/
-static inline __m256i encode_chars(__m256i in) {
- __m256i tmp1, tmp2, tmp3;
-
- /*
- * Base64 encoding table, see RFC4648
- *
- * We again use fan-in for the ORs here.
- */
- tmp1 = translate_range(in, 0, 25, 'A');
- tmp2 = translate_range(in, 26, 26 + 25, 'a');
- tmp3 = translate_range(in, 52, 61, '0');
- tmp1 = _mm256_or_si256(tmp1, translate_exact(in, 62, '+'));
- tmp2 = _mm256_or_si256(tmp2, translate_exact(in, 63, '/'));
-
- return _mm256_or_si256(tmp3, _mm256_or_si256(tmp1, tmp2));
-}
-
-/*
- * Input: A 256-bit vector, interpreted as 24 bytes (LSB) plus 8 bytes of high-byte padding
- * Output: A 256-bit vector of base64 characters
- */
-static inline __m256i encode_stride(__m256i vec) {
- /*
- * First, since byte-shuffle operations operate within 128-bit subvectors, swap around the dwords
- * to balance the amount of actual data between 128-bit subvectors.
- * After this we want the LE representation to look like: -- XX XX XX -- XX XX XX
- */
- __m256i shuf32 = _mm256_set_epi32(7, 5, 4, 3, 6, 2, 1, 0);
- vec = _mm256_permutevar8x32_epi32(vec, shuf32);
-
- /*
- * Next, within each group of 3 bytes, we need to byteswap into little endian form so our bitshifts
- * will work properly. We also shuffle around so that each dword has one 3-byte group, plus one byte
- * (MSB) of zero-padding.
- * Because this is a byte-shuffle, indexes are within each 128-bit subvector.
- *
- * -- -- -- -- 11 10 09 08 07 06 05 04 03 02 01 00
- */
-
- const aligned256 shufvec_buf = {
- /* clang-format off */
- /* MSB */
- 2, 1, 0, 0xFF,
- 5, 4, 3, 0xFF,
- 8, 7, 6, 0xFF,
- 11, 10, 9, 0xFF,
-
- 2, 1, 0, 0xFF,
- 5, 4, 3, 0xFF,
- 8, 7, 6, 0xFF,
- 11, 10, 9, 0xFF
- /* LSB */
- /* clang-format on */
- };
- vec = _mm256_shuffle_epi8(vec, _mm256_load_si256((__m256i const *)&shufvec_buf));
-
- /*
- * Now shift and mask to split out 6-bit groups.
- * We'll also do a second byteswap to get back into big-endian
- */
- __m256i mask0 = _mm256_set1_epi32(0x3F);
- __m256i mask1 = _mm256_set1_epi32(0x3F << 6);
- __m256i mask2 = _mm256_set1_epi32(0x3F << 12);
- __m256i mask3 = _mm256_set1_epi32(0x3F << 18);
-
- __m256i digit0 = _mm256_and_si256(mask0, vec);
- __m256i digit1 = _mm256_and_si256(mask1, vec);
- __m256i digit2 = _mm256_and_si256(mask2, vec);
- __m256i digit3 = _mm256_and_si256(mask3, vec);
-
- /*
- * Because we want to byteswap, the low-order digit0 goes into the
- * high-order byte
- */
- digit0 = _mm256_slli_epi32(digit0, 24);
- digit1 = _mm256_slli_epi32(digit1, 10);
- digit2 = _mm256_srli_epi32(digit2, 4);
- digit3 = _mm256_srli_epi32(digit3, 18);
-
- vec = _mm256_or_si256(_mm256_or_si256(digit0, digit1), _mm256_or_si256(digit2, digit3));
-
- /* Finally translate to the base64 character set */
- return encode_chars(vec);
-}
-
-void aws_common_private_base64_encode_sse41(const uint8_t *input, uint8_t *output, size_t inlen) {
- __m256i instride, outstride;
-
- while (inlen >= 32) {
- /*
- * Where possible, we'll load a full vector at a time and ignore the over-read.
- * However, if we have < 32 bytes left, this would result in a potential read
- * of unreadable pages, so we use bounce buffers below.
- */
- instride = _mm256_loadu_si256((__m256i const *)input);
- outstride = encode_stride(instride);
- _mm256_storeu_si256((__m256i *)output, outstride);
-
- input += 24;
- output += 32;
- inlen -= 24;
- }
-
- while (inlen) {
- /*
- * We need to go through a bounce buffer for anything remaining, as we
- * don't want to over-read or over-write the ends of the buffers.
- */
- size_t stridelen = inlen > 24 ? 24 : inlen;
- size_t outlen = ((stridelen + 2) / 3) * 4;
-
- memset(&instride, 0, sizeof(instride));
- memcpy(&instride, input, stridelen);
-
- outstride = encode_stride(instride);
- memcpy(output, &outstride, outlen);
-
- if (inlen < 24) {
- if (inlen % 3 >= 1) {
- /* AA== or AAA= */
- output[outlen - 1] = '=';
- }
- if (inlen % 3 == 1) {
- /* AA== */
- output[outlen - 2] = '=';
- }
-
- return;
- }
-
- input += stridelen;
- output += outlen;
- inlen -= stridelen;
- }
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <emmintrin.h>
+#include <immintrin.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <aws/common/common.h>
+
+/***** Decode logic *****/
+
+/*
+ * Decodes ranges of bytes in place
+ * For each byte of 'in' that is between lo and hi (inclusive), adds offset and _adds_ it to the corresponding offset in
+ * out.
+ */
+static inline __m256i translate_range(__m256i in, uint8_t lo, uint8_t hi, uint8_t offset) {
+ __m256i lovec = _mm256_set1_epi8(lo);
+ __m256i hivec = _mm256_set1_epi8((char)(hi - lo));
+ __m256i offsetvec = _mm256_set1_epi8(offset);
+
+ __m256i tmp = _mm256_sub_epi8(in, lovec);
+ /*
+ * we'll use the unsigned min operator to do our comparison. Note that
+ * there's no unsigned compare as a comparison intrinsic.
+ */
+ __m256i mask = _mm256_min_epu8(tmp, hivec);
+ /* if mask = tmp, then keep that byte */
+ mask = _mm256_cmpeq_epi8(mask, tmp);
+
+ tmp = _mm256_add_epi8(tmp, offsetvec);
+ tmp = _mm256_and_si256(tmp, mask);
+ return tmp;
+}
+
+/*
+ * For each 8-bit element in in, if the element equals match, add to the corresponding element in out the value decode.
+ */
+static inline __m256i translate_exact(__m256i in, uint8_t match, uint8_t decode) {
+ __m256i mask = _mm256_cmpeq_epi8(in, _mm256_set1_epi8(match));
+ return _mm256_and_si256(mask, _mm256_set1_epi8(decode));
+}
+
+/*
+ * Input: a pointer to a 256-bit vector of base64 characters
+ * The pointed-to-vector is replaced by a 256-bit vector of 6-bit decoded parts;
+ * on decode failure, returns false, else returns true on success.
+ */
+static inline bool decode_vec(__m256i *in) {
+ __m256i tmp1, tmp2, tmp3;
+
+ /*
+ * Base64 decoding table, see RFC4648
+ *
+ * Note that we use multiple vector registers to try to allow the CPU to
+ * paralellize the merging ORs
+ */
+ tmp1 = translate_range(*in, 'A', 'Z', 0 + 1);
+ tmp2 = translate_range(*in, 'a', 'z', 26 + 1);
+ tmp3 = translate_range(*in, '0', '9', 52 + 1);
+ tmp1 = _mm256_or_si256(tmp1, translate_exact(*in, '+', 62 + 1));
+ tmp2 = _mm256_or_si256(tmp2, translate_exact(*in, '/', 63 + 1));
+ tmp3 = _mm256_or_si256(tmp3, _mm256_or_si256(tmp1, tmp2));
+
+ /*
+ * We use 0 to mark decode failures, so everything is decoded to one higher
+ * than normal. We'll shift this down now.
+ */
+ *in = _mm256_sub_epi8(tmp3, _mm256_set1_epi8(1));
+
+ /* If any byte is now zero, we had a decode failure */
+ __m256i mask = _mm256_cmpeq_epi8(tmp3, _mm256_set1_epi8(0));
+ return _mm256_testz_si256(mask, mask);
+}
+
+AWS_ALIGNED_TYPEDEF(uint8_t, aligned256[32], 32);
+
+/*
+ * Input: a 256-bit vector, interpreted as 32 * 6-bit values
+ * Output: a 256-bit vector, the lower 24 bytes of which contain the packed version of the input
+ */
+static inline __m256i pack_vec(__m256i in) {
+ /*
+ * Our basic strategy is to split the input vector into three vectors, for each 6-bit component
+ * of each 24-bit group, shift the groups into place, then OR the vectors together. Conveniently,
+ * we can do this on a (32 bit) dword-by-dword basis.
+ *
+ * It's important to note that we're interpreting the vector as being little-endian. That is,
+ * on entry, we have dwords that look like this:
+ *
+ * MSB LSB
+ * 00DD DDDD 00CC CCCC 00BB BBBB 00AA AAAA
+ *
+ * And we want to translate to:
+ *
+ * MSB LSB
+ * 0000 0000 AAAA AABB BBBB CCCC CCDD DDDD
+ *
+ * After which point we can pack these dwords together to produce our final output.
+ */
+ __m256i maskA = _mm256_set1_epi32(0xFF); // low bits
+ __m256i maskB = _mm256_set1_epi32(0xFF00);
+ __m256i maskC = _mm256_set1_epi32(0xFF0000);
+ __m256i maskD = _mm256_set1_epi32((int)0xFF000000);
+
+ __m256i bitsA = _mm256_slli_epi32(_mm256_and_si256(in, maskA), 18);
+ __m256i bitsB = _mm256_slli_epi32(_mm256_and_si256(in, maskB), 4);
+ __m256i bitsC = _mm256_srli_epi32(_mm256_and_si256(in, maskC), 10);
+ __m256i bitsD = _mm256_srli_epi32(_mm256_and_si256(in, maskD), 24);
+
+ __m256i dwords = _mm256_or_si256(_mm256_or_si256(bitsA, bitsB), _mm256_or_si256(bitsC, bitsD));
+ /*
+ * Now we have a series of dwords with empty MSBs.
+ * We need to pack them together (and shift down) with a shuffle operation.
+ * Unfortunately the shuffle operation operates independently within each 128-bit lane,
+ * so we'll need to do this in two steps: First we compact dwords within each lane, then
+ * we do a dword shuffle to compact the two lanes together.
+
+ * 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00 <- byte index (little endian)
+ * -- 09 0a 0b -- 06 07 08 -- 03 04 05 -- 00 01 02 <- data index
+ *
+ * We also reverse the order of 3-byte fragments within each lane; we've constructed
+ * those fragments in little endian but the order of fragments within the overall
+ * vector is in memory order (big endian)
+ */
+ const aligned256 shufvec_buf = {
+ /* clang-format off */
+ /* MSB */
+ 0xFF, 0xFF, 0xFF, 0xFF, /* Zero out the top 4 bytes of the lane */
+ 2, 1, 0,
+ 6, 5, 4,
+ 10, 9, 8,
+ 14, 13, 12,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, /* Zero out the top 4 bytes of the lane */
+ 2, 1, 0,
+ 6, 5, 4,
+ 10, 9, 8,
+ 14, 13, 12
+ /* LSB */
+ /* clang-format on */
+ };
+ __m256i shufvec = _mm256_load_si256((__m256i const *)&shufvec_buf);
+
+ dwords = _mm256_shuffle_epi8(dwords, shufvec);
+ /*
+ * Now shuffle the 32-bit words:
+ * A B C 0 D E F 0 -> 0 0 A B C D E F
+ */
+ __m256i shuf32 = _mm256_set_epi32(0, 0, 7, 6, 5, 3, 2, 1);
+
+ dwords = _mm256_permutevar8x32_epi32(dwords, shuf32);
+
+ return dwords;
+}
+
+static inline bool decode(const unsigned char *in, unsigned char *out) {
+ __m256i vec = _mm256_loadu_si256((__m256i const *)in);
+ if (!decode_vec(&vec)) {
+ return false;
+ }
+ vec = pack_vec(vec);
+
+ /*
+ * We'll do overlapping writes to get both the low 128 bits and the high 64-bits written.
+ * Input (memory order): 0 1 2 3 4 5 - - (dwords)
+ * Input (little endian) - - 5 4 3 2 1 0
+ * Output in memory:
+ * [0 1 2 3] [4 5]
+ */
+ __m128i lo = _mm256_extracti128_si256(vec, 0);
+ /*
+ * Unfortunately some compilers don't support _mm256_extract_epi64,
+ * so we'll just copy right out of the vector as a fallback
+ */
+
+#ifdef HAVE_MM256_EXTRACT_EPI64
+ uint64_t hi = _mm256_extract_epi64(vec, 2);
+ const uint64_t *p_hi = &hi;
+#else
+ const uint64_t *p_hi = (uint64_t *)&vec + 2;
+#endif
+
+ _mm_storeu_si128((__m128i *)out, lo);
+ memcpy(out + 16, p_hi, sizeof(*p_hi));
+
+ return true;
+}
+
+size_t aws_common_private_base64_decode_sse41(const unsigned char *in, unsigned char *out, size_t len) {
+ if (len % 4) {
+ return (size_t)-1;
+ }
+
+ size_t outlen = 0;
+ while (len > 32) {
+ if (!decode(in, out)) {
+ return (size_t)-1;
+ }
+ len -= 32;
+ in += 32;
+ out += 24;
+ outlen += 24;
+ }
+
+ if (len > 0) {
+ unsigned char tmp_in[32];
+ unsigned char tmp_out[24];
+
+ memset(tmp_out, 0xEE, sizeof(tmp_out));
+
+ /* We need to ensure the vector contains valid b64 characters */
+ memset(tmp_in, 'A', sizeof(tmp_in));
+ memcpy(tmp_in, in, len);
+
+ size_t final_out = (3 * len) / 4;
+
+ /* Check for end-of-string padding (up to 2 characters) */
+ for (int i = 0; i < 2; i++) {
+ if (tmp_in[len - 1] == '=') {
+ tmp_in[len - 1] = 'A'; /* make sure the inner loop doesn't bail out */
+ len--;
+ final_out--;
+ }
+ }
+
+ if (!decode(tmp_in, tmp_out)) {
+ return (size_t)-1;
+ }
+
+ /* Check that there are no trailing ones bits */
+ for (size_t i = final_out; i < sizeof(tmp_out); i++) {
+ if (tmp_out[i]) {
+ return (size_t)-1;
+ }
+ }
+
+ memcpy(out, tmp_out, final_out);
+ outlen += final_out;
+ }
+ return outlen;
+}
+
+/***** Encode logic *****/
+static inline __m256i encode_chars(__m256i in) {
+ __m256i tmp1, tmp2, tmp3;
+
+ /*
+ * Base64 encoding table, see RFC4648
+ *
+ * We again use fan-in for the ORs here.
+ */
+ tmp1 = translate_range(in, 0, 25, 'A');
+ tmp2 = translate_range(in, 26, 26 + 25, 'a');
+ tmp3 = translate_range(in, 52, 61, '0');
+ tmp1 = _mm256_or_si256(tmp1, translate_exact(in, 62, '+'));
+ tmp2 = _mm256_or_si256(tmp2, translate_exact(in, 63, '/'));
+
+ return _mm256_or_si256(tmp3, _mm256_or_si256(tmp1, tmp2));
+}
+
+/*
+ * Input: A 256-bit vector, interpreted as 24 bytes (LSB) plus 8 bytes of high-byte padding
+ * Output: A 256-bit vector of base64 characters
+ */
+static inline __m256i encode_stride(__m256i vec) {
+ /*
+ * First, since byte-shuffle operations operate within 128-bit subvectors, swap around the dwords
+ * to balance the amount of actual data between 128-bit subvectors.
+ * After this we want the LE representation to look like: -- XX XX XX -- XX XX XX
+ */
+ __m256i shuf32 = _mm256_set_epi32(7, 5, 4, 3, 6, 2, 1, 0);
+ vec = _mm256_permutevar8x32_epi32(vec, shuf32);
+
+ /*
+ * Next, within each group of 3 bytes, we need to byteswap into little endian form so our bitshifts
+ * will work properly. We also shuffle around so that each dword has one 3-byte group, plus one byte
+ * (MSB) of zero-padding.
+ * Because this is a byte-shuffle, indexes are within each 128-bit subvector.
+ *
+ * -- -- -- -- 11 10 09 08 07 06 05 04 03 02 01 00
+ */
+
+ const aligned256 shufvec_buf = {
+ /* clang-format off */
+ /* MSB */
+ 2, 1, 0, 0xFF,
+ 5, 4, 3, 0xFF,
+ 8, 7, 6, 0xFF,
+ 11, 10, 9, 0xFF,
+
+ 2, 1, 0, 0xFF,
+ 5, 4, 3, 0xFF,
+ 8, 7, 6, 0xFF,
+ 11, 10, 9, 0xFF
+ /* LSB */
+ /* clang-format on */
+ };
+ vec = _mm256_shuffle_epi8(vec, _mm256_load_si256((__m256i const *)&shufvec_buf));
+
+ /*
+ * Now shift and mask to split out 6-bit groups.
+ * We'll also do a second byteswap to get back into big-endian
+ */
+ __m256i mask0 = _mm256_set1_epi32(0x3F);
+ __m256i mask1 = _mm256_set1_epi32(0x3F << 6);
+ __m256i mask2 = _mm256_set1_epi32(0x3F << 12);
+ __m256i mask3 = _mm256_set1_epi32(0x3F << 18);
+
+ __m256i digit0 = _mm256_and_si256(mask0, vec);
+ __m256i digit1 = _mm256_and_si256(mask1, vec);
+ __m256i digit2 = _mm256_and_si256(mask2, vec);
+ __m256i digit3 = _mm256_and_si256(mask3, vec);
+
+ /*
+ * Because we want to byteswap, the low-order digit0 goes into the
+ * high-order byte
+ */
+ digit0 = _mm256_slli_epi32(digit0, 24);
+ digit1 = _mm256_slli_epi32(digit1, 10);
+ digit2 = _mm256_srli_epi32(digit2, 4);
+ digit3 = _mm256_srli_epi32(digit3, 18);
+
+ vec = _mm256_or_si256(_mm256_or_si256(digit0, digit1), _mm256_or_si256(digit2, digit3));
+
+ /* Finally translate to the base64 character set */
+ return encode_chars(vec);
+}
+
+void aws_common_private_base64_encode_sse41(const uint8_t *input, uint8_t *output, size_t inlen) {
+ __m256i instride, outstride;
+
+ while (inlen >= 32) {
+ /*
+ * Where possible, we'll load a full vector at a time and ignore the over-read.
+ * However, if we have < 32 bytes left, this would result in a potential read
+ * of unreadable pages, so we use bounce buffers below.
+ */
+ instride = _mm256_loadu_si256((__m256i const *)input);
+ outstride = encode_stride(instride);
+ _mm256_storeu_si256((__m256i *)output, outstride);
+
+ input += 24;
+ output += 32;
+ inlen -= 24;
+ }
+
+ while (inlen) {
+ /*
+ * We need to go through a bounce buffer for anything remaining, as we
+ * don't want to over-read or over-write the ends of the buffers.
+ */
+ size_t stridelen = inlen > 24 ? 24 : inlen;
+ size_t outlen = ((stridelen + 2) / 3) * 4;
+
+ memset(&instride, 0, sizeof(instride));
+ memcpy(&instride, input, stridelen);
+
+ outstride = encode_stride(instride);
+ memcpy(output, &outstride, outlen);
+
+ if (inlen < 24) {
+ if (inlen % 3 >= 1) {
+ /* AA== or AAA= */
+ output[outlen - 1] = '=';
+ }
+ if (inlen % 3 == 1) {
+ /* AA== */
+ output[outlen - 2] = '=';
+ }
+
+ return;
+ }
+
+ input += stridelen;
+ output += outlen;
+ inlen -= stridelen;
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/array_list.c b/contrib/restricted/aws/aws-c-common/source/array_list.c
index 9d2e906159..7e05636a75 100644
--- a/contrib/restricted/aws/aws-c-common/source/array_list.c
+++ b/contrib/restricted/aws/aws-c-common/source/array_list.c
@@ -1,29 +1,29 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/array_list.h>
-#include <aws/common/private/array_list.h>
+#include <aws/common/private/array_list.h>
#include <stdlib.h> /* qsort */
-int aws_array_list_calc_necessary_size(struct aws_array_list *AWS_RESTRICT list, size_t index, size_t *necessary_size) {
- AWS_PRECONDITION(aws_array_list_is_valid(list));
- size_t index_inc;
- if (aws_add_size_checked(index, 1, &index_inc)) {
- AWS_POSTCONDITION(aws_array_list_is_valid(list));
- return AWS_OP_ERR;
- }
-
- if (aws_mul_size_checked(index_inc, list->item_size, necessary_size)) {
- AWS_POSTCONDITION(aws_array_list_is_valid(list));
- return AWS_OP_ERR;
- }
- AWS_POSTCONDITION(aws_array_list_is_valid(list));
- return AWS_OP_SUCCESS;
-}
-
+int aws_array_list_calc_necessary_size(struct aws_array_list *AWS_RESTRICT list, size_t index, size_t *necessary_size) {
+ AWS_PRECONDITION(aws_array_list_is_valid(list));
+ size_t index_inc;
+ if (aws_add_size_checked(index, 1, &index_inc)) {
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mul_size_checked(index_inc, list->item_size, necessary_size)) {
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return AWS_OP_ERR;
+ }
+ AWS_POSTCONDITION(aws_array_list_is_valid(list));
+ return AWS_OP_SUCCESS;
+}
+
int aws_array_list_shrink_to_fit(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
if (list->alloc) {
@@ -58,8 +58,8 @@ int aws_array_list_shrink_to_fit(struct aws_array_list *AWS_RESTRICT list) {
}
int aws_array_list_copy(const struct aws_array_list *AWS_RESTRICT from, struct aws_array_list *AWS_RESTRICT to) {
- AWS_FATAL_PRECONDITION(from->item_size == to->item_size);
- AWS_FATAL_PRECONDITION(from->data);
+ AWS_FATAL_PRECONDITION(from->item_size == to->item_size);
+ AWS_FATAL_PRECONDITION(from->data);
AWS_PRECONDITION(aws_array_list_is_valid(from));
AWS_PRECONDITION(aws_array_list_is_valid(to));
@@ -169,8 +169,8 @@ int aws_array_list_ensure_capacity(struct aws_array_list *AWS_RESTRICT list, siz
static void aws_array_list_mem_swap(void *AWS_RESTRICT item1, void *AWS_RESTRICT item2, size_t item_size) {
enum { SLICE = 128 };
- AWS_FATAL_PRECONDITION(item1);
- AWS_FATAL_PRECONDITION(item2);
+ AWS_FATAL_PRECONDITION(item1);
+ AWS_FATAL_PRECONDITION(item2);
/* copy SLICE sized bytes at a time */
size_t slice_count = item_size / SLICE;
@@ -190,8 +190,8 @@ static void aws_array_list_mem_swap(void *AWS_RESTRICT item1, void *AWS_RESTRICT
}
void aws_array_list_swap(struct aws_array_list *AWS_RESTRICT list, size_t a, size_t b) {
- AWS_FATAL_PRECONDITION(a < list->length);
- AWS_FATAL_PRECONDITION(b < list->length);
+ AWS_FATAL_PRECONDITION(a < list->length);
+ AWS_FATAL_PRECONDITION(b < list->length);
AWS_PRECONDITION(aws_array_list_is_valid(list));
if (a == b) {
diff --git a/contrib/restricted/aws/aws-c-common/source/assert.c b/contrib/restricted/aws/aws-c-common/source/assert.c
index 0a3ef8de16..9aaae9a19e 100644
--- a/contrib/restricted/aws/aws-c-common/source/assert.c
+++ b/contrib/restricted/aws/aws-c-common/source/assert.c
@@ -1,12 +1,12 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
-#include <aws/common/system_info.h>
-
+#include <aws/common/system_info.h>
+
#include <stdio.h>
#include <stdlib.h>
diff --git a/contrib/restricted/aws/aws-c-common/source/byte_buf.c b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
index d07a1e9b38..ca18f4121b 100644
--- a/contrib/restricted/aws/aws-c-common/source/byte_buf.c
+++ b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
@@ -1,10 +1,10 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
-#include <aws/common/private/byte_buf.h>
+#include <aws/common/private/byte_buf.h>
#include <stdarg.h>
@@ -15,12 +15,12 @@
#endif
int aws_byte_buf_init(struct aws_byte_buf *buf, struct aws_allocator *allocator, size_t capacity) {
- AWS_PRECONDITION(buf);
- AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(allocator);
buf->buffer = (capacity == 0) ? NULL : aws_mem_acquire(allocator, capacity);
if (capacity != 0 && buf->buffer == NULL) {
- AWS_ZERO_STRUCT(*buf);
+ AWS_ZERO_STRUCT(*buf);
return AWS_OP_ERR;
}
@@ -32,9 +32,9 @@ int aws_byte_buf_init(struct aws_byte_buf *buf, struct aws_allocator *allocator,
}
int aws_byte_buf_init_copy(struct aws_byte_buf *dest, struct aws_allocator *allocator, const struct aws_byte_buf *src) {
- AWS_PRECONDITION(allocator);
- AWS_PRECONDITION(dest);
- AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(src));
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(dest);
+ AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(src));
if (!src->buffer) {
AWS_ZERO_STRUCT(*dest);
@@ -56,23 +56,23 @@ int aws_byte_buf_init_copy(struct aws_byte_buf *dest, struct aws_allocator *allo
}
bool aws_byte_buf_is_valid(const struct aws_byte_buf *const buf) {
- return buf != NULL &&
- ((buf->capacity == 0 && buf->len == 0 && buf->buffer == NULL) ||
- (buf->capacity > 0 && buf->len <= buf->capacity && AWS_MEM_IS_WRITABLE(buf->buffer, buf->capacity)));
+ return buf != NULL &&
+ ((buf->capacity == 0 && buf->len == 0 && buf->buffer == NULL) ||
+ (buf->capacity > 0 && buf->len <= buf->capacity && AWS_MEM_IS_WRITABLE(buf->buffer, buf->capacity)));
}
bool aws_byte_cursor_is_valid(const struct aws_byte_cursor *cursor) {
- return cursor != NULL &&
- ((cursor->len == 0) || (cursor->len > 0 && cursor->ptr && AWS_MEM_IS_READABLE(cursor->ptr, cursor->len)));
+ return cursor != NULL &&
+ ((cursor->len == 0) || (cursor->len > 0 && cursor->ptr && AWS_MEM_IS_READABLE(cursor->ptr, cursor->len)));
+}
+
+void aws_byte_buf_reset(struct aws_byte_buf *buf, bool zero_contents) {
+ if (zero_contents) {
+ aws_byte_buf_secure_zero(buf);
+ }
+ buf->len = 0;
}
-void aws_byte_buf_reset(struct aws_byte_buf *buf, bool zero_contents) {
- if (zero_contents) {
- aws_byte_buf_secure_zero(buf);
- }
- buf->len = 0;
-}
-
void aws_byte_buf_clean_up(struct aws_byte_buf *buf) {
AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
if (buf->allocator && buf->buffer) {
@@ -94,53 +94,53 @@ void aws_byte_buf_secure_zero(struct aws_byte_buf *buf) {
}
void aws_byte_buf_clean_up_secure(struct aws_byte_buf *buf) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
aws_byte_buf_secure_zero(buf);
aws_byte_buf_clean_up(buf);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
}
-bool aws_byte_buf_eq(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(a));
- AWS_PRECONDITION(aws_byte_buf_is_valid(b));
- bool rval = aws_array_eq(a->buffer, a->len, b->buffer, b->len);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(a));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
- return rval;
+bool aws_byte_buf_eq(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(a));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(b));
+ bool rval = aws_array_eq(a->buffer, a->len, b->buffer, b->len);
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(a));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
+ return rval;
}
-bool aws_byte_buf_eq_ignore_case(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(a));
- AWS_PRECONDITION(aws_byte_buf_is_valid(b));
- bool rval = aws_array_eq_ignore_case(a->buffer, a->len, b->buffer, b->len);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(a));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
- return rval;
+bool aws_byte_buf_eq_ignore_case(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(a));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(b));
+ bool rval = aws_array_eq_ignore_case(a->buffer, a->len, b->buffer, b->len);
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(a));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
+ return rval;
}
-bool aws_byte_buf_eq_c_str(const struct aws_byte_buf *const buf, const char *const c_str) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(c_str != NULL);
- bool rval = aws_array_eq_c_str(buf->buffer, buf->len, c_str);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return rval;
+bool aws_byte_buf_eq_c_str(const struct aws_byte_buf *const buf, const char *const c_str) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(c_str != NULL);
+ bool rval = aws_array_eq_c_str(buf->buffer, buf->len, c_str);
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return rval;
}
-bool aws_byte_buf_eq_c_str_ignore_case(const struct aws_byte_buf *const buf, const char *const c_str) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(c_str != NULL);
- bool rval = aws_array_eq_c_str_ignore_case(buf->buffer, buf->len, c_str);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return rval;
+bool aws_byte_buf_eq_c_str_ignore_case(const struct aws_byte_buf *const buf, const char *const c_str) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(c_str != NULL);
+ bool rval = aws_array_eq_c_str_ignore_case(buf->buffer, buf->len, c_str);
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return rval;
}
int aws_byte_buf_init_copy_from_cursor(
struct aws_byte_buf *dest,
struct aws_allocator *allocator,
struct aws_byte_cursor src) {
- AWS_PRECONDITION(allocator);
- AWS_PRECONDITION(dest);
- AWS_ERROR_PRECONDITION(aws_byte_cursor_is_valid(&src));
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(dest);
+ AWS_ERROR_PRECONDITION(aws_byte_cursor_is_valid(&src));
AWS_ZERO_STRUCT(*dest);
@@ -159,89 +159,89 @@ int aws_byte_buf_init_copy_from_cursor(
return AWS_OP_SUCCESS;
}
-int aws_byte_buf_init_cache_and_update_cursors(struct aws_byte_buf *dest, struct aws_allocator *allocator, ...) {
- AWS_PRECONDITION(allocator);
- AWS_PRECONDITION(dest);
-
- AWS_ZERO_STRUCT(*dest);
-
- size_t total_len = 0;
- va_list args;
- va_start(args, allocator);
-
- /* Loop until final NULL arg is encountered */
- struct aws_byte_cursor *cursor_i;
- while ((cursor_i = va_arg(args, struct aws_byte_cursor *)) != NULL) {
- AWS_ASSERT(aws_byte_cursor_is_valid(cursor_i));
- if (aws_add_size_checked(total_len, cursor_i->len, &total_len)) {
- return AWS_OP_ERR;
- }
- }
- va_end(args);
-
- if (aws_byte_buf_init(dest, allocator, total_len)) {
- return AWS_OP_ERR;
- }
-
- va_start(args, allocator);
- while ((cursor_i = va_arg(args, struct aws_byte_cursor *)) != NULL) {
- /* Impossible for this call to fail, we pre-allocated sufficient space */
- aws_byte_buf_append_and_update(dest, cursor_i);
- }
- va_end(args);
-
- return AWS_OP_SUCCESS;
-}
-
+int aws_byte_buf_init_cache_and_update_cursors(struct aws_byte_buf *dest, struct aws_allocator *allocator, ...) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(dest);
+
+ AWS_ZERO_STRUCT(*dest);
+
+ size_t total_len = 0;
+ va_list args;
+ va_start(args, allocator);
+
+ /* Loop until final NULL arg is encountered */
+ struct aws_byte_cursor *cursor_i;
+ while ((cursor_i = va_arg(args, struct aws_byte_cursor *)) != NULL) {
+ AWS_ASSERT(aws_byte_cursor_is_valid(cursor_i));
+ if (aws_add_size_checked(total_len, cursor_i->len, &total_len)) {
+ return AWS_OP_ERR;
+ }
+ }
+ va_end(args);
+
+ if (aws_byte_buf_init(dest, allocator, total_len)) {
+ return AWS_OP_ERR;
+ }
+
+ va_start(args, allocator);
+ while ((cursor_i = va_arg(args, struct aws_byte_cursor *)) != NULL) {
+ /* Impossible for this call to fail, we pre-allocated sufficient space */
+ aws_byte_buf_append_and_update(dest, cursor_i);
+ }
+ va_end(args);
+
+ return AWS_OP_SUCCESS;
+}
+
bool aws_byte_cursor_next_split(
const struct aws_byte_cursor *AWS_RESTRICT input_str,
char split_on,
struct aws_byte_cursor *AWS_RESTRICT substr) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(input_str));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(input_str));
- /* If substr is zeroed-out, then this is the first run. */
- const bool first_run = substr->ptr == NULL;
+ /* If substr is zeroed-out, then this is the first run. */
+ const bool first_run = substr->ptr == NULL;
- /* It's legal for input_str to be zeroed out: {.ptr=NULL, .len=0}
- * Deal with this case separately */
- if (AWS_UNLIKELY(input_str->ptr == NULL)) {
- if (first_run) {
- /* Set substr->ptr to something non-NULL so that next split() call doesn't look like the first run */
- substr->ptr = (void *)"";
- substr->len = 0;
- return true;
- }
+ /* It's legal for input_str to be zeroed out: {.ptr=NULL, .len=0}
+ * Deal with this case separately */
+ if (AWS_UNLIKELY(input_str->ptr == NULL)) {
+ if (first_run) {
+ /* Set substr->ptr to something non-NULL so that next split() call doesn't look like the first run */
+ substr->ptr = (void *)"";
+ substr->len = 0;
+ return true;
+ }
- /* done */
+ /* done */
AWS_ZERO_STRUCT(*substr);
return false;
}
- /* Rest of function deals with non-NULL input_str->ptr */
-
- if (first_run) {
- *substr = *input_str;
- } else {
- /* This is not the first run.
- * Advance substr past the previous split. */
- const uint8_t *input_end = input_str->ptr + input_str->len;
- substr->ptr += substr->len + 1;
-
- /* Note that it's ok if substr->ptr == input_end, this happens in the
- * final valid split of an input_str that ends with the split_on character:
- * Ex: "AB&" split on '&' produces "AB" and "" */
- if (substr->ptr > input_end || substr->ptr < input_str->ptr) { /* 2nd check is overflow check */
- /* done */
- AWS_ZERO_STRUCT(*substr);
- return false;
+ /* Rest of function deals with non-NULL input_str->ptr */
+
+ if (first_run) {
+ *substr = *input_str;
+ } else {
+ /* This is not the first run.
+ * Advance substr past the previous split. */
+ const uint8_t *input_end = input_str->ptr + input_str->len;
+ substr->ptr += substr->len + 1;
+
+ /* Note that it's ok if substr->ptr == input_end, this happens in the
+ * final valid split of an input_str that ends with the split_on character:
+ * Ex: "AB&" split on '&' produces "AB" and "" */
+ if (substr->ptr > input_end || substr->ptr < input_str->ptr) { /* 2nd check is overflow check */
+ /* done */
+ AWS_ZERO_STRUCT(*substr);
+ return false;
}
-
- /* update len to be remainder of the string */
- substr->len = input_str->len - (substr->ptr - input_str->ptr);
+
+ /* update len to be remainder of the string */
+ substr->len = input_str->len - (substr->ptr - input_str->ptr);
}
- /* substr is now remainder of string, search for next split */
+ /* substr is now remainder of string, search for next split */
uint8_t *new_location = memchr(substr->ptr, split_on, substr->len);
if (new_location) {
@@ -249,7 +249,7 @@ bool aws_byte_cursor_next_split(
substr->len = new_location - substr->ptr;
}
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(substr));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(substr));
return true;
}
@@ -258,7 +258,7 @@ int aws_byte_cursor_split_on_char_n(
char split_on,
size_t n,
struct aws_array_list *AWS_RESTRICT output) {
- AWS_ASSERT(aws_byte_cursor_is_valid(input_str));
+ AWS_ASSERT(aws_byte_cursor_is_valid(input_str));
AWS_ASSERT(output);
AWS_ASSERT(output->item_size >= sizeof(struct aws_byte_cursor));
@@ -293,46 +293,46 @@ int aws_byte_cursor_split_on_char(
return aws_byte_cursor_split_on_char_n(input_str, split_on, 0, output);
}
-int aws_byte_cursor_find_exact(
- const struct aws_byte_cursor *AWS_RESTRICT input_str,
- const struct aws_byte_cursor *AWS_RESTRICT to_find,
- struct aws_byte_cursor *first_find) {
- if (to_find->len > input_str->len) {
- return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
- }
-
- if (to_find->len < 1) {
- return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
- }
-
- struct aws_byte_cursor working_cur = *input_str;
-
- while (working_cur.len) {
- uint8_t *first_char_location = memchr(working_cur.ptr, (char)*to_find->ptr, working_cur.len);
-
- if (!first_char_location) {
- return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
- }
-
- aws_byte_cursor_advance(&working_cur, first_char_location - working_cur.ptr);
-
- if (working_cur.len < to_find->len) {
- return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
- }
-
- if (!memcmp(working_cur.ptr, to_find->ptr, to_find->len)) {
- *first_find = working_cur;
- return AWS_OP_SUCCESS;
- }
-
- aws_byte_cursor_advance(&working_cur, 1);
- }
-
- return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
-}
-
+int aws_byte_cursor_find_exact(
+ const struct aws_byte_cursor *AWS_RESTRICT input_str,
+ const struct aws_byte_cursor *AWS_RESTRICT to_find,
+ struct aws_byte_cursor *first_find) {
+ if (to_find->len > input_str->len) {
+ return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
+ }
+
+ if (to_find->len < 1) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ struct aws_byte_cursor working_cur = *input_str;
+
+ while (working_cur.len) {
+ uint8_t *first_char_location = memchr(working_cur.ptr, (char)*to_find->ptr, working_cur.len);
+
+ if (!first_char_location) {
+ return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
+ }
+
+ aws_byte_cursor_advance(&working_cur, first_char_location - working_cur.ptr);
+
+ if (working_cur.len < to_find->len) {
+ return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
+ }
+
+ if (!memcmp(working_cur.ptr, to_find->ptr, to_find->len)) {
+ *first_find = working_cur;
+ return AWS_OP_SUCCESS;
+ }
+
+ aws_byte_cursor_advance(&working_cur, 1);
+ }
+
+ return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND);
+}
+
int aws_byte_buf_cat(struct aws_byte_buf *dest, size_t number_of_args, ...) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
va_list ap;
va_start(ap, number_of_args);
@@ -343,36 +343,36 @@ int aws_byte_buf_cat(struct aws_byte_buf *dest, size_t number_of_args, ...) {
if (aws_byte_buf_append(dest, &cursor)) {
va_end(ap);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_ERR;
}
}
va_end(ap);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
return AWS_OP_SUCCESS;
}
bool aws_byte_cursor_eq(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(b));
- bool rv = aws_array_eq(a->ptr, a->len, b->ptr, b->len);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(b));
- return rv;
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(b));
+ bool rv = aws_array_eq(a->ptr, a->len, b->ptr, b->len);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(b));
+ return rv;
}
bool aws_byte_cursor_eq_ignore_case(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(b));
- bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->ptr, b->len);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(b));
- return rv;
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(b));
+ bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->ptr, b->len);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(b));
+ return rv;
}
/* Every possible uint8_t value, lowercased */
-static const uint8_t s_tolower_table[] = {
+static const uint8_t s_tolower_table[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 'a',
@@ -385,21 +385,21 @@ static const uint8_t s_tolower_table[] = {
198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255};
-AWS_STATIC_ASSERT(AWS_ARRAY_SIZE(s_tolower_table) == 256);
+AWS_STATIC_ASSERT(AWS_ARRAY_SIZE(s_tolower_table) == 256);
const uint8_t *aws_lookup_table_to_lower_get(void) {
return s_tolower_table;
}
-bool aws_array_eq_ignore_case(
- const void *const array_a,
- const size_t len_a,
- const void *const array_b,
- const size_t len_b) {
- AWS_PRECONDITION(
- (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a].");
- AWS_PRECONDITION(
- (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b].");
+bool aws_array_eq_ignore_case(
+ const void *const array_a,
+ const size_t len_a,
+ const void *const array_b,
+ const size_t len_b) {
+ AWS_PRECONDITION(
+ (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a].");
+ AWS_PRECONDITION(
+ (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b].");
if (len_a != len_b) {
return false;
@@ -416,11 +416,11 @@ bool aws_array_eq_ignore_case(
return true;
}
-bool aws_array_eq(const void *const array_a, const size_t len_a, const void *const array_b, const size_t len_b) {
- AWS_PRECONDITION(
- (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a].");
- AWS_PRECONDITION(
- (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b].");
+bool aws_array_eq(const void *const array_a, const size_t len_a, const void *const array_b, const size_t len_b) {
+ AWS_PRECONDITION(
+ (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a].");
+ AWS_PRECONDITION(
+ (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b].");
if (len_a != len_b) {
return false;
@@ -433,11 +433,11 @@ bool aws_array_eq(const void *const array_a, const size_t len_a, const void *con
return !memcmp(array_a, array_b, len_a);
}
-bool aws_array_eq_c_str_ignore_case(const void *const array, const size_t array_len, const char *const c_str) {
- AWS_PRECONDITION(
- array || (array_len == 0),
- "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero.");
- AWS_PRECONDITION(c_str != NULL);
+bool aws_array_eq_c_str_ignore_case(const void *const array, const size_t array_len, const char *const c_str) {
+ AWS_PRECONDITION(
+ array || (array_len == 0),
+ "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero.");
+ AWS_PRECONDITION(c_str != NULL);
/* Simpler implementation could have been:
* return aws_array_eq_ignore_case(array, array_len, c_str, strlen(c_str));
@@ -461,11 +461,11 @@ bool aws_array_eq_c_str_ignore_case(const void *const array, const size_t array_
return str_bytes[array_len] == '\0';
}
-bool aws_array_eq_c_str(const void *const array, const size_t array_len, const char *const c_str) {
- AWS_PRECONDITION(
- array || (array_len == 0),
- "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero.");
- AWS_PRECONDITION(c_str != NULL);
+bool aws_array_eq_c_str(const void *const array, const size_t array_len, const char *const c_str) {
+ AWS_PRECONDITION(
+ array || (array_len == 0),
+ "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero.");
+ AWS_PRECONDITION(c_str != NULL);
/* Simpler implementation could have been:
* return aws_array_eq(array, array_len, c_str, strlen(c_str));
@@ -489,8 +489,8 @@ bool aws_array_eq_c_str(const void *const array, const size_t array_len, const c
return str_bytes[array_len] == '\0';
}
-uint64_t aws_hash_array_ignore_case(const void *array, const size_t len) {
- AWS_PRECONDITION(AWS_MEM_IS_READABLE(array, len));
+uint64_t aws_hash_array_ignore_case(const void *array, const size_t len) {
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(array, len));
/* FNV-1a: https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function */
const uint64_t fnv_offset_basis = 0xcbf29ce484222325ULL;
const uint64_t fnv_prime = 0x100000001b3ULL;
@@ -502,60 +502,60 @@ uint64_t aws_hash_array_ignore_case(const void *array, const size_t len) {
while (i != end) {
const uint8_t lower = s_tolower_table[*i++];
hash ^= lower;
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
hash *= fnv_prime;
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
}
return hash;
}
uint64_t aws_hash_byte_cursor_ptr_ignore_case(const void *item) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(item));
- const struct aws_byte_cursor *const cursor = item;
- uint64_t rval = aws_hash_array_ignore_case(cursor->ptr, cursor->len);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(item));
- return rval;
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(item));
+ const struct aws_byte_cursor *const cursor = item;
+ uint64_t rval = aws_hash_array_ignore_case(cursor->ptr, cursor->len);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(item));
+ return rval;
}
-bool aws_byte_cursor_eq_byte_buf(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
- AWS_PRECONDITION(aws_byte_buf_is_valid(b));
- bool rv = aws_array_eq(a->ptr, a->len, b->buffer, b->len);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
- return rv;
+bool aws_byte_cursor_eq_byte_buf(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(b));
+ bool rv = aws_array_eq(a->ptr, a->len, b->buffer, b->len);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
+ return rv;
}
-bool aws_byte_cursor_eq_byte_buf_ignore_case(
- const struct aws_byte_cursor *const a,
- const struct aws_byte_buf *const b) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
- AWS_PRECONDITION(aws_byte_buf_is_valid(b));
- bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->buffer, b->len);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
- return rv;
+bool aws_byte_cursor_eq_byte_buf_ignore_case(
+ const struct aws_byte_cursor *const a,
+ const struct aws_byte_buf *const b) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(a));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(b));
+ bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->buffer, b->len);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(a));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(b));
+ return rv;
}
-bool aws_byte_cursor_eq_c_str(const struct aws_byte_cursor *const cursor, const char *const c_str) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
- AWS_PRECONDITION(c_str != NULL);
- bool rv = aws_array_eq_c_str(cursor->ptr, cursor->len, c_str);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
- return rv;
+bool aws_byte_cursor_eq_c_str(const struct aws_byte_cursor *const cursor, const char *const c_str) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
+ AWS_PRECONDITION(c_str != NULL);
+ bool rv = aws_array_eq_c_str(cursor->ptr, cursor->len, c_str);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
+ return rv;
}
-bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
- AWS_PRECONDITION(c_str != NULL);
- bool rv = aws_array_eq_c_str_ignore_case(cursor->ptr, cursor->len, c_str);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
- return rv;
+bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
+ AWS_PRECONDITION(c_str != NULL);
+ bool rv = aws_array_eq_c_str_ignore_case(cursor->ptr, cursor->len, c_str);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
+ return rv;
}
int aws_byte_buf_append(struct aws_byte_buf *to, const struct aws_byte_cursor *from) {
@@ -587,8 +587,8 @@ int aws_byte_buf_append_with_lookup(
const uint8_t *lookup_table) {
AWS_PRECONDITION(aws_byte_buf_is_valid(to));
AWS_PRECONDITION(aws_byte_cursor_is_valid(from));
- AWS_PRECONDITION(
- AWS_MEM_IS_READABLE(lookup_table, 256), "Input array [lookup_table] must be at least 256 bytes long.");
+ AWS_PRECONDITION(
+ AWS_MEM_IS_READABLE(lookup_table, 256), "Input array [lookup_table] must be at least 256 bytes long.");
if (to->capacity - to->len < from->len) {
AWS_POSTCONDITION(aws_byte_buf_is_valid(to));
@@ -609,13 +609,13 @@ int aws_byte_buf_append_with_lookup(
return AWS_OP_SUCCESS;
}
-static int s_aws_byte_buf_append_dynamic(
- struct aws_byte_buf *to,
- const struct aws_byte_cursor *from,
- bool clear_released_memory) {
+static int s_aws_byte_buf_append_dynamic(
+ struct aws_byte_buf *to,
+ const struct aws_byte_cursor *from,
+ bool clear_released_memory) {
AWS_PRECONDITION(aws_byte_buf_is_valid(to));
AWS_PRECONDITION(aws_byte_cursor_is_valid(from));
- AWS_ERROR_PRECONDITION(to->allocator);
+ AWS_ERROR_PRECONDITION(to->allocator);
if (to->capacity - to->len < from->len) {
/*
@@ -679,11 +679,11 @@ static int s_aws_byte_buf_append_dynamic(
if (from->len > 0) {
memcpy(new_buffer + to->len, from->ptr, from->len);
}
-
- if (clear_released_memory) {
- aws_secure_zero(to->buffer, to->capacity);
- }
-
+
+ if (clear_released_memory) {
+ aws_secure_zero(to->buffer, to->capacity);
+ }
+
/*
* Get rid of the old buffer
*/
@@ -710,41 +710,41 @@ static int s_aws_byte_buf_append_dynamic(
return AWS_OP_SUCCESS;
}
-int aws_byte_buf_append_dynamic(struct aws_byte_buf *to, const struct aws_byte_cursor *from) {
- return s_aws_byte_buf_append_dynamic(to, from, false);
-}
-
-int aws_byte_buf_append_dynamic_secure(struct aws_byte_buf *to, const struct aws_byte_cursor *from) {
- return s_aws_byte_buf_append_dynamic(to, from, true);
-}
-
-static int s_aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value, bool clear_released_memory) {
-#if defined(_MSC_VER)
-# pragma warning(push)
-# pragma warning(disable : 4221)
-#endif /* _MSC_VER */
-
- /* msvc isn't a fan of this pointer-to-local assignment */
- struct aws_byte_cursor eq_cursor = {.len = 1, .ptr = &value};
-
-#if defined(_MSC_VER)
-# pragma warning(pop)
-#endif /* _MSC_VER */
-
- return s_aws_byte_buf_append_dynamic(buffer, &eq_cursor, clear_released_memory);
-}
-
-int aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value) {
- return s_aws_byte_buf_append_byte_dynamic(buffer, value, false);
-}
-
-int aws_byte_buf_append_byte_dynamic_secure(struct aws_byte_buf *buffer, uint8_t value) {
- return s_aws_byte_buf_append_byte_dynamic(buffer, value, true);
-}
-
+int aws_byte_buf_append_dynamic(struct aws_byte_buf *to, const struct aws_byte_cursor *from) {
+ return s_aws_byte_buf_append_dynamic(to, from, false);
+}
+
+int aws_byte_buf_append_dynamic_secure(struct aws_byte_buf *to, const struct aws_byte_cursor *from) {
+ return s_aws_byte_buf_append_dynamic(to, from, true);
+}
+
+static int s_aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value, bool clear_released_memory) {
+#if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4221)
+#endif /* _MSC_VER */
+
+ /* msvc isn't a fan of this pointer-to-local assignment */
+ struct aws_byte_cursor eq_cursor = {.len = 1, .ptr = &value};
+
+#if defined(_MSC_VER)
+# pragma warning(pop)
+#endif /* _MSC_VER */
+
+ return s_aws_byte_buf_append_dynamic(buffer, &eq_cursor, clear_released_memory);
+}
+
+int aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value) {
+ return s_aws_byte_buf_append_byte_dynamic(buffer, value, false);
+}
+
+int aws_byte_buf_append_byte_dynamic_secure(struct aws_byte_buf *buffer, uint8_t value) {
+ return s_aws_byte_buf_append_byte_dynamic(buffer, value, true);
+}
+
int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity) {
- AWS_ERROR_PRECONDITION(buffer->allocator);
- AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer));
+ AWS_ERROR_PRECONDITION(buffer->allocator);
+ AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer));
if (requested_capacity <= buffer->capacity) {
AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
@@ -762,8 +762,8 @@ int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity)
}
int aws_byte_buf_reserve_relative(struct aws_byte_buf *buffer, size_t additional_length) {
- AWS_ERROR_PRECONDITION(buffer->allocator);
- AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer));
+ AWS_ERROR_PRECONDITION(buffer->allocator);
+ AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer));
size_t requested_capacity = 0;
if (AWS_UNLIKELY(aws_add_size_checked(buffer->len, additional_length, &requested_capacity))) {
@@ -777,852 +777,852 @@ int aws_byte_buf_reserve_relative(struct aws_byte_buf *buffer, size_t additional
struct aws_byte_cursor aws_byte_cursor_right_trim_pred(
const struct aws_byte_cursor *source,
aws_byte_predicate_fn *predicate) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(source));
- AWS_PRECONDITION(predicate != NULL);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(source));
+ AWS_PRECONDITION(predicate != NULL);
struct aws_byte_cursor trimmed = *source;
while (trimmed.len > 0 && predicate(*(trimmed.ptr + trimmed.len - 1))) {
--trimmed.len;
}
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed));
return trimmed;
}
struct aws_byte_cursor aws_byte_cursor_left_trim_pred(
const struct aws_byte_cursor *source,
aws_byte_predicate_fn *predicate) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(source));
- AWS_PRECONDITION(predicate != NULL);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(source));
+ AWS_PRECONDITION(predicate != NULL);
struct aws_byte_cursor trimmed = *source;
while (trimmed.len > 0 && predicate(*(trimmed.ptr))) {
--trimmed.len;
++trimmed.ptr;
}
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed));
return trimmed;
}
struct aws_byte_cursor aws_byte_cursor_trim_pred(
const struct aws_byte_cursor *source,
aws_byte_predicate_fn *predicate) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(source));
- AWS_PRECONDITION(predicate != NULL);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(source));
+ AWS_PRECONDITION(predicate != NULL);
struct aws_byte_cursor left_trimmed = aws_byte_cursor_left_trim_pred(source, predicate);
- struct aws_byte_cursor dest = aws_byte_cursor_right_trim_pred(&left_trimmed, predicate);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&dest));
- return dest;
+ struct aws_byte_cursor dest = aws_byte_cursor_right_trim_pred(&left_trimmed, predicate);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&dest));
+ return dest;
}
bool aws_byte_cursor_satisfies_pred(const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate) {
struct aws_byte_cursor trimmed = aws_byte_cursor_left_trim_pred(source, predicate);
- bool rval = (trimmed.len == 0);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
- return rval;
-}
-
-int aws_byte_cursor_compare_lexical(const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs));
- /* make sure we don't pass NULL pointers to memcmp */
- AWS_PRECONDITION(lhs->ptr != NULL);
- AWS_PRECONDITION(rhs->ptr != NULL);
- size_t comparison_length = lhs->len;
- if (comparison_length > rhs->len) {
- comparison_length = rhs->len;
- }
-
- int result = memcmp(lhs->ptr, rhs->ptr, comparison_length);
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs));
- if (result != 0) {
- return result;
- }
-
- if (lhs->len != rhs->len) {
- return comparison_length == lhs->len ? -1 : 1;
- }
-
- return 0;
-}
-
-int aws_byte_cursor_compare_lookup(
- const struct aws_byte_cursor *lhs,
- const struct aws_byte_cursor *rhs,
- const uint8_t *lookup_table) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs));
- AWS_PRECONDITION(AWS_MEM_IS_READABLE(lookup_table, 256));
- const uint8_t *lhs_curr = lhs->ptr;
- const uint8_t *lhs_end = lhs_curr + lhs->len;
-
- const uint8_t *rhs_curr = rhs->ptr;
- const uint8_t *rhs_end = rhs_curr + rhs->len;
-
- while (lhs_curr < lhs_end && rhs_curr < rhs_end) {
- uint8_t lhc = lookup_table[*lhs_curr];
- uint8_t rhc = lookup_table[*rhs_curr];
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs));
- if (lhc < rhc) {
- return -1;
- }
-
- if (lhc > rhc) {
- return 1;
- }
-
- lhs_curr++;
- rhs_curr++;
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs));
- if (lhs_curr < lhs_end) {
- return 1;
- }
-
- if (rhs_curr < rhs_end) {
- return -1;
- }
-
- return 0;
-}
-
-/**
- * For creating a byte buffer from a null-terminated string literal.
- */
-struct aws_byte_buf aws_byte_buf_from_c_str(const char *c_str) {
- struct aws_byte_buf buf;
- buf.len = (!c_str) ? 0 : strlen(c_str);
- buf.capacity = buf.len;
- buf.buffer = (buf.capacity == 0) ? NULL : (uint8_t *)c_str;
- buf.allocator = NULL;
- AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf));
- return buf;
-}
-
-struct aws_byte_buf aws_byte_buf_from_array(const void *bytes, size_t len) {
- AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(bytes, len), "Input array [bytes] must be writable up to [len] bytes.");
- struct aws_byte_buf buf;
- buf.buffer = (len > 0) ? (uint8_t *)bytes : NULL;
- buf.len = len;
- buf.capacity = len;
- buf.allocator = NULL;
- AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf));
- return buf;
-}
-
-struct aws_byte_buf aws_byte_buf_from_empty_array(const void *bytes, size_t capacity) {
- AWS_PRECONDITION(
- AWS_MEM_IS_WRITABLE(bytes, capacity), "Input array [bytes] must be writable up to [capacity] bytes.");
- struct aws_byte_buf buf;
- buf.buffer = (capacity > 0) ? (uint8_t *)bytes : NULL;
- buf.len = 0;
- buf.capacity = capacity;
- buf.allocator = NULL;
- AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf));
- return buf;
-}
-
-struct aws_byte_cursor aws_byte_cursor_from_buf(const struct aws_byte_buf *const buf) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- struct aws_byte_cursor cur;
- cur.ptr = buf->buffer;
- cur.len = buf->len;
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur));
- return cur;
-}
-
-struct aws_byte_cursor aws_byte_cursor_from_c_str(const char *c_str) {
- struct aws_byte_cursor cur;
- cur.ptr = (uint8_t *)c_str;
- cur.len = (cur.ptr) ? strlen(c_str) : 0;
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur));
- return cur;
-}
-
-struct aws_byte_cursor aws_byte_cursor_from_array(const void *const bytes, const size_t len) {
- AWS_PRECONDITION(len == 0 || AWS_MEM_IS_READABLE(bytes, len), "Input array [bytes] must be readable up to [len].");
- struct aws_byte_cursor cur;
- cur.ptr = (uint8_t *)bytes;
- cur.len = len;
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur));
- return cur;
-}
-
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
-/**
- * If index >= bound, bound > (SIZE_MAX / 2), or index > (SIZE_MAX / 2), returns
- * 0. Otherwise, returns UINTPTR_MAX. This function is designed to return the correct
- * value even under CPU speculation conditions, and is intended to be used for
- * SPECTRE mitigation purposes.
- */
-size_t aws_nospec_mask(size_t index, size_t bound) {
- /*
- * SPECTRE mitigation - we compute a mask that will be zero if len < 0
- * or len >= buf->len, and all-ones otherwise, and AND it into the index.
- * It is critical that we avoid any branches in this logic.
- */
-
- /*
- * Hide the index value from the optimizer. This helps ensure that all this
- * logic doesn't get eliminated.
- */
-#if defined(__GNUC__) || defined(__clang__)
- __asm__ __volatile__("" : "+r"(index));
-#endif
-#if defined(_MSVC_LANG)
- /*
- * MSVC doesn't have a good way for us to blind the optimizer, and doesn't
- * even have inline asm on x64. Some experimentation indicates that this
- * hack seems to confuse it sufficiently for our needs.
- */
- *((volatile uint8_t *)&index) += 0;
-#endif
-
- /*
- * If len > (SIZE_MAX / 2), then we can end up with len - buf->len being
- * positive simply because the sign bit got inverted away. So we also check
- * that the sign bit isn't set from the start.
- *
- * We also check that bound <= (SIZE_MAX / 2) to catch cases where the
- * buffer is _already_ out of bounds.
- */
- size_t negative_mask = index | bound;
- size_t toobig_mask = bound - index - (uintptr_t)1;
- size_t combined_mask = negative_mask | toobig_mask;
-
- /*
- * combined_mask needs to have its sign bit OFF for us to be in range.
- * We'd like to expand this to a mask we can AND into our index, so flip
- * that bit (and everything else), shift it over so it's the only bit in the
- * ones position, and multiply across the entire register.
- *
- * First, extract the (inverse) top bit and move it to the lowest bit.
- * Because there's no standard SIZE_BIT in C99, we'll divide by a mask with
- * just the top bit set instead.
- */
-
- combined_mask = (~combined_mask) / (SIZE_MAX - (SIZE_MAX >> 1));
-
- /*
- * Now multiply it to replicate it across all bits.
- *
- * Note that GCC is smart enough to optimize the divide-and-multiply into
- * an arithmetic right shift operation on x86.
- */
- combined_mask = combined_mask * UINTPTR_MAX;
-
- return combined_mask;
-}
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
-
-/**
- * Tests if the given aws_byte_cursor has at least len bytes remaining. If so,
- * *buf is advanced by len bytes (incrementing ->ptr and decrementing ->len),
- * and an aws_byte_cursor referring to the first len bytes of the original *buf
- * is returned. Otherwise, an aws_byte_cursor with ->ptr = NULL, ->len = 0 is
- * returned.
- *
- * Note that if len is above (SIZE_MAX / 2), this function will also treat it as
- * a buffer overflow, and return NULL without changing *buf.
- */
-struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cursor, const size_t len) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
- struct aws_byte_cursor rv;
- if (cursor->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || len > cursor->len) {
- rv.ptr = NULL;
- rv.len = 0;
- } else {
- rv.ptr = cursor->ptr;
- rv.len = len;
-
- cursor->ptr += len;
- cursor->len -= len;
- }
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv));
- return rv;
-}
-
-/**
- * Behaves identically to aws_byte_cursor_advance, but avoids speculative
- * execution potentially reading out-of-bounds pointers (by returning an
- * empty ptr in such speculated paths).
- *
- * This should generally be done when using an untrusted or
- * data-dependent value for 'len', to avoid speculating into a path where
- * cursor->ptr points outside the true ptr length.
- */
-
-struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *const cursor, size_t len) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
-
- struct aws_byte_cursor rv;
-
- if (len <= cursor->len && len <= (SIZE_MAX >> 1) && cursor->len <= (SIZE_MAX >> 1)) {
- /*
- * If we're speculating past a failed bounds check, null out the pointer. This ensures
- * that we don't try to read past the end of the buffer and leak information about other
- * memory through timing side-channels.
- */
- uintptr_t mask = aws_nospec_mask(len, cursor->len + 1);
-
- /* Make sure we don't speculate-underflow len either */
- len = len & mask;
- cursor->ptr = (uint8_t *)((uintptr_t)cursor->ptr & mask);
- /* Make sure subsequent nospec accesses don't advance ptr past NULL */
- cursor->len = cursor->len & mask;
-
- rv.ptr = cursor->ptr;
- /* Make sure anything acting upon the returned cursor _also_ doesn't advance past NULL */
- rv.len = len & mask;
-
- cursor->ptr += len;
- cursor->len -= len;
- } else {
- rv.ptr = NULL;
- rv.len = 0;
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv));
- return rv;
-}
-
-/**
- * Reads specified length of data from byte cursor and copies it to the
- * destination array.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read(struct aws_byte_cursor *AWS_RESTRICT cur, void *AWS_RESTRICT dest, const size_t len) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(dest, len));
- if (len == 0) {
- return true;
- }
-
- struct aws_byte_cursor slice = aws_byte_cursor_advance_nospec(cur, len);
-
- if (slice.ptr) {
- memcpy(dest, slice.ptr, len);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- AWS_POSTCONDITION(AWS_MEM_IS_READABLE(dest, len));
- return true;
- }
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return false;
-}
-
-/**
- * Reads as many bytes from cursor as size of buffer, and copies them to buffer.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_and_fill_buffer(
- struct aws_byte_cursor *AWS_RESTRICT cur,
- struct aws_byte_buf *AWS_RESTRICT dest) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
- if (aws_byte_cursor_read(cur, dest->buffer, dest->capacity)) {
- dest->len = dest->capacity;
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return true;
- }
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return false;
-}
-
-/**
- * Reads a single byte from cursor, placing it in *var.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_u8(struct aws_byte_cursor *AWS_RESTRICT cur, uint8_t *AWS_RESTRICT var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(var, 1));
- bool rv = aws_byte_cursor_read(cur, var, 1);
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return rv;
-}
-
-/**
- * Reads a 16-bit value in network byte order from cur, and places it in host
- * byte order into var.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_be16(struct aws_byte_cursor *cur, uint16_t *var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
- bool rv = aws_byte_cursor_read(cur, var, 2);
-
- if (AWS_LIKELY(rv)) {
- *var = aws_ntoh16(*var);
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return rv;
-}
-
-/**
- * Reads an unsigned 24-bit value (3 bytes) in network byte order from cur,
- * and places it in host byte order into 32-bit var.
- * Ex: if cur's next 3 bytes are {0xAA, 0xBB, 0xCC}, then var becomes 0x00AABBCC.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_be24(struct aws_byte_cursor *cur, uint32_t *var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
-
- uint8_t *var_bytes = (void *)var;
-
- /* read into "lower" 3 bytes */
- bool rv = aws_byte_cursor_read(cur, &var_bytes[1], 3);
-
- if (AWS_LIKELY(rv)) {
- /* zero out "highest" 4th byte*/
- var_bytes[0] = 0;
-
- *var = aws_ntoh32(*var);
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return rv;
-}
-
-/**
- * Reads a 32-bit value in network byte order from cur, and places it in host
- * byte order into var.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_be32(struct aws_byte_cursor *cur, uint32_t *var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
- bool rv = aws_byte_cursor_read(cur, var, 4);
-
- if (AWS_LIKELY(rv)) {
- *var = aws_ntoh32(*var);
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return rv;
-}
-
-/**
- * Reads a 32-bit value in network byte order from cur, and places it in host
- * byte order into var.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_float_be32(struct aws_byte_cursor *cur, float *var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
- bool rv = aws_byte_cursor_read(cur, var, sizeof(float));
-
- if (AWS_LIKELY(rv)) {
- *var = aws_ntohf32(*var);
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return rv;
-}
-
-/**
- * Reads a 64-bit value in network byte order from cur, and places it in host
- * byte order into var.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_float_be64(struct aws_byte_cursor *cur, double *var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
- bool rv = aws_byte_cursor_read(cur, var, sizeof(double));
-
- if (AWS_LIKELY(rv)) {
- *var = aws_ntohf64(*var);
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return rv;
-}
-
-/**
- * Reads a 64-bit value in network byte order from cur, and places it in host
- * byte order into var.
- *
- * On success, returns true and updates the cursor pointer/length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_cursor_read_be64(struct aws_byte_cursor *cur, uint64_t *var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
- bool rv = aws_byte_cursor_read(cur, var, sizeof(*var));
-
- if (AWS_LIKELY(rv)) {
- *var = aws_ntoh64(*var);
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return rv;
-}
-
-/* Lookup from '0' -> 0, 'f' -> 0xf, 'F' -> 0xF, etc
- * invalid characters have value 255 */
-/* clang-format off */
-static const uint8_t s_hex_to_num_table[] = {
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255,
- /* 0 - 9 */
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
- 255, 255, 255, 255, 255, 255, 255,
- /* A - F */
- 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255,
- /* a - f */
- 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-};
-AWS_STATIC_ASSERT(AWS_ARRAY_SIZE(s_hex_to_num_table) == 256);
-/* clang-format on */
-
-const uint8_t *aws_lookup_table_hex_to_num_get(void) {
- return s_hex_to_num_table;
-}
-
-bool aws_byte_cursor_read_hex_u8(struct aws_byte_cursor *cur, uint8_t *var) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
-
- bool success = false;
- if (AWS_LIKELY(cur->len >= 2)) {
- const uint8_t hi = s_hex_to_num_table[cur->ptr[0]];
- const uint8_t lo = s_hex_to_num_table[cur->ptr[1]];
-
- /* table maps invalid characters to 255 */
- if (AWS_LIKELY(hi != 255 && lo != 255)) {
- *var = (hi << 4) | lo;
- cur->ptr += 2;
- cur->len -= 2;
- success = true;
- }
- }
-
- AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
- return success;
-}
-
-/**
- * Appends a sub-buffer to the specified buffer.
- *
- * If the buffer has at least `len' bytes remaining (buffer->capacity - buffer->len >= len),
- * then buffer->len is incremented by len, and an aws_byte_buf is assigned to *output corresponding
- * to the last len bytes of the input buffer. The aws_byte_buf at *output will have a null
- * allocator, a zero initial length, and a capacity of 'len'. The function then returns true.
- *
- * If there is insufficient space, then this function nulls all fields in *output and returns
- * false.
- */
-bool aws_byte_buf_advance(
- struct aws_byte_buf *const AWS_RESTRICT buffer,
- struct aws_byte_buf *const AWS_RESTRICT output,
- const size_t len) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buffer));
- AWS_PRECONDITION(aws_byte_buf_is_valid(output));
- if (buffer->capacity - buffer->len >= len) {
- *output = aws_byte_buf_from_array(buffer->buffer + buffer->len, len);
- buffer->len += len;
- output->len = 0;
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(output));
- return true;
- } else {
- AWS_ZERO_STRUCT(*output);
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(output));
- return false;
- }
-}
-
-/**
- * Write specified number of bytes from array to byte buffer.
- *
- * On success, returns true and updates the buffer length accordingly.
- * If there is insufficient space in the buffer, returns false, leaving the
- * buffer unchanged.
- */
-bool aws_byte_buf_write(struct aws_byte_buf *AWS_RESTRICT buf, const uint8_t *AWS_RESTRICT src, size_t len) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(AWS_MEM_IS_READABLE(src, len), "Input array [src] must be readable up to [len] bytes.");
-
- if (len == 0) {
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return true;
- }
-
- if (buf->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || buf->len + len > buf->capacity) {
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return false;
- }
-
- memcpy(buf->buffer + buf->len, src, len);
- buf->len += len;
-
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return true;
-}
-
-/**
- * Copies all bytes from buffer to buffer.
- *
- * On success, returns true and updates the buffer /length accordingly.
- * If there is insufficient space in the buffer, returns false, leaving the
- * buffer unchanged.
- */
-bool aws_byte_buf_write_from_whole_buffer(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_buf src) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(aws_byte_buf_is_valid(&src));
- return aws_byte_buf_write(buf, src.buffer, src.len);
-}
-
-/**
- * Copies all bytes from buffer to buffer.
- *
- * On success, returns true and updates the buffer /length accordingly.
- * If there is insufficient space in the buffer, returns false, leaving the
- * buffer unchanged.
- */
-bool aws_byte_buf_write_from_whole_cursor(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_cursor src) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&src));
- return aws_byte_buf_write(buf, src.ptr, src.len);
-}
-
-struct aws_byte_cursor aws_byte_buf_write_to_capacity(
- struct aws_byte_buf *buf,
- struct aws_byte_cursor *advancing_cursor) {
-
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(advancing_cursor));
-
- size_t available = buf->capacity - buf->len;
- size_t write_size = aws_min_size(available, advancing_cursor->len);
- struct aws_byte_cursor write_cursor = aws_byte_cursor_advance(advancing_cursor, write_size);
- aws_byte_buf_write_from_whole_cursor(buf, write_cursor);
- return write_cursor;
-}
-
-/**
- * Copies one byte to buffer.
- *
- * On success, returns true and updates the cursor /length
- accordingly.
-
- * If there is insufficient space in the cursor, returns false, leaving the
- cursor unchanged.
- */
-bool aws_byte_buf_write_u8(struct aws_byte_buf *AWS_RESTRICT buf, uint8_t c) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- return aws_byte_buf_write(buf, &c, 1);
-}
-
-/**
- * Writes one byte repeatedly to buffer (like memset)
- *
- * If there is insufficient space in the buffer, returns false, leaving the
- * buffer unchanged.
- */
-bool aws_byte_buf_write_u8_n(struct aws_byte_buf *buf, uint8_t c, size_t count) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
-
- if (buf->len > (SIZE_MAX >> 1) || count > (SIZE_MAX >> 1) || buf->len + count > buf->capacity) {
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return false;
- }
-
- memset(buf->buffer + buf->len, c, count);
- buf->len += count;
-
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return true;
-}
-
-/**
- * Writes a 16-bit integer in network byte order (big endian) to buffer.
- *
- * On success, returns true and updates the cursor /length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_buf_write_be16(struct aws_byte_buf *buf, uint16_t x) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- x = aws_hton16(x);
- return aws_byte_buf_write(buf, (uint8_t *)&x, 2);
-}
-
-/**
- * Writes low 24-bits (3 bytes) of an unsigned integer in network byte order (big endian) to buffer.
- * Ex: If x is 0x00AABBCC then {0xAA, 0xBB, 0xCC} is written to buffer.
- *
- * On success, returns true and updates the buffer /length accordingly.
- * If there is insufficient space in the buffer, or x's value cannot fit in 3 bytes,
- * returns false, leaving the buffer unchanged.
- */
-bool aws_byte_buf_write_be24(struct aws_byte_buf *buf, uint32_t x) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
-
- if (x > 0x00FFFFFF) {
- return false;
- }
-
- uint32_t be32 = aws_hton32(x);
- uint8_t *be32_bytes = (uint8_t *)&be32;
-
- /* write "lower" 3 bytes */
- return aws_byte_buf_write(buf, &be32_bytes[1], 3);
-}
-
-/**
- * Writes a 32-bit integer in network byte order (big endian) to buffer.
- *
- * On success, returns true and updates the cursor /length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_buf_write_be32(struct aws_byte_buf *buf, uint32_t x) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- x = aws_hton32(x);
- return aws_byte_buf_write(buf, (uint8_t *)&x, 4);
-}
-
-/**
- * Writes a 32-bit float in network byte order (big endian) to buffer.
- *
- * On success, returns true and updates the cursor /length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_buf_write_float_be32(struct aws_byte_buf *buf, float x) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- x = aws_htonf32(x);
- return aws_byte_buf_write(buf, (uint8_t *)&x, 4);
-}
-
-/**
- * Writes a 64-bit integer in network byte order (big endian) to buffer.
- *
- * On success, returns true and updates the cursor /length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_buf_write_be64(struct aws_byte_buf *buf, uint64_t x) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- x = aws_hton64(x);
- return aws_byte_buf_write(buf, (uint8_t *)&x, 8);
-}
-
-/**
- * Writes a 64-bit float in network byte order (big endian) to buffer.
- *
- * On success, returns true and updates the cursor /length accordingly.
- * If there is insufficient space in the cursor, returns false, leaving the
- * cursor unchanged.
- */
-bool aws_byte_buf_write_float_be64(struct aws_byte_buf *buf, double x) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- x = aws_htonf64(x);
- return aws_byte_buf_write(buf, (uint8_t *)&x, 8);
-}
-
-int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_cursor *from_and_update) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(to));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(from_and_update));
-
- if (aws_byte_buf_append(to, from_and_update)) {
- return AWS_OP_ERR;
- }
-
- from_and_update->ptr = to->buffer + (to->len - from_and_update->len);
- return AWS_OP_SUCCESS;
-}
-
-static struct aws_byte_cursor s_null_terminator_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\0");
-int aws_byte_buf_append_null_terminator(struct aws_byte_buf *buf) {
- return aws_byte_buf_append_dynamic(buf, &s_null_terminator_cursor);
-}
-
-bool aws_isalnum(uint8_t ch) {
- return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9');
-}
-
-bool aws_isalpha(uint8_t ch) {
- return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z');
-}
-
-bool aws_isdigit(uint8_t ch) {
- return (ch >= '0' && ch <= '9');
-}
-
-bool aws_isxdigit(uint8_t ch) {
- return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F');
-}
-
-bool aws_isspace(uint8_t ch) {
- switch (ch) {
- case 0x20: /* ' ' - space */
- return true;
- case 0x09: /* '\t' - horizontal tab */
- return true;
- case 0x0A: /* '\n' - line feed */
- return true;
- case 0x0B: /* '\v' - vertical tab */
- return true;
- case 0x0C: /* '\f' - form feed */
- return true;
- case 0x0D: /* '\r' - carriage return */
- return true;
- default:
- return false;
- }
-}
+ bool rval = (trimmed.len == 0);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(source));
+ return rval;
+}
+
+int aws_byte_cursor_compare_lexical(const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs));
+ /* make sure we don't pass NULL pointers to memcmp */
+ AWS_PRECONDITION(lhs->ptr != NULL);
+ AWS_PRECONDITION(rhs->ptr != NULL);
+ size_t comparison_length = lhs->len;
+ if (comparison_length > rhs->len) {
+ comparison_length = rhs->len;
+ }
+
+ int result = memcmp(lhs->ptr, rhs->ptr, comparison_length);
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs));
+ if (result != 0) {
+ return result;
+ }
+
+ if (lhs->len != rhs->len) {
+ return comparison_length == lhs->len ? -1 : 1;
+ }
+
+ return 0;
+}
+
+int aws_byte_cursor_compare_lookup(
+ const struct aws_byte_cursor *lhs,
+ const struct aws_byte_cursor *rhs,
+ const uint8_t *lookup_table) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs));
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(lookup_table, 256));
+ const uint8_t *lhs_curr = lhs->ptr;
+ const uint8_t *lhs_end = lhs_curr + lhs->len;
+
+ const uint8_t *rhs_curr = rhs->ptr;
+ const uint8_t *rhs_end = rhs_curr + rhs->len;
+
+ while (lhs_curr < lhs_end && rhs_curr < rhs_end) {
+ uint8_t lhc = lookup_table[*lhs_curr];
+ uint8_t rhc = lookup_table[*rhs_curr];
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs));
+ if (lhc < rhc) {
+ return -1;
+ }
+
+ if (lhc > rhc) {
+ return 1;
+ }
+
+ lhs_curr++;
+ rhs_curr++;
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs));
+ if (lhs_curr < lhs_end) {
+ return 1;
+ }
+
+ if (rhs_curr < rhs_end) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * For creating a byte buffer from a null-terminated string literal.
+ */
+struct aws_byte_buf aws_byte_buf_from_c_str(const char *c_str) {
+ struct aws_byte_buf buf;
+ buf.len = (!c_str) ? 0 : strlen(c_str);
+ buf.capacity = buf.len;
+ buf.buffer = (buf.capacity == 0) ? NULL : (uint8_t *)c_str;
+ buf.allocator = NULL;
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf));
+ return buf;
+}
+
+struct aws_byte_buf aws_byte_buf_from_array(const void *bytes, size_t len) {
+ AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(bytes, len), "Input array [bytes] must be writable up to [len] bytes.");
+ struct aws_byte_buf buf;
+ buf.buffer = (len > 0) ? (uint8_t *)bytes : NULL;
+ buf.len = len;
+ buf.capacity = len;
+ buf.allocator = NULL;
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf));
+ return buf;
+}
+
+struct aws_byte_buf aws_byte_buf_from_empty_array(const void *bytes, size_t capacity) {
+ AWS_PRECONDITION(
+ AWS_MEM_IS_WRITABLE(bytes, capacity), "Input array [bytes] must be writable up to [capacity] bytes.");
+ struct aws_byte_buf buf;
+ buf.buffer = (capacity > 0) ? (uint8_t *)bytes : NULL;
+ buf.len = 0;
+ buf.capacity = capacity;
+ buf.allocator = NULL;
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf));
+ return buf;
+}
+
+struct aws_byte_cursor aws_byte_cursor_from_buf(const struct aws_byte_buf *const buf) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ struct aws_byte_cursor cur;
+ cur.ptr = buf->buffer;
+ cur.len = buf->len;
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur));
+ return cur;
+}
+
+struct aws_byte_cursor aws_byte_cursor_from_c_str(const char *c_str) {
+ struct aws_byte_cursor cur;
+ cur.ptr = (uint8_t *)c_str;
+ cur.len = (cur.ptr) ? strlen(c_str) : 0;
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur));
+ return cur;
+}
+
+struct aws_byte_cursor aws_byte_cursor_from_array(const void *const bytes, const size_t len) {
+ AWS_PRECONDITION(len == 0 || AWS_MEM_IS_READABLE(bytes, len), "Input array [bytes] must be readable up to [len].");
+ struct aws_byte_cursor cur;
+ cur.ptr = (uint8_t *)bytes;
+ cur.len = len;
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur));
+ return cur;
+}
+
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
+/**
+ * If index >= bound, bound > (SIZE_MAX / 2), or index > (SIZE_MAX / 2), returns
+ * 0. Otherwise, returns UINTPTR_MAX. This function is designed to return the correct
+ * value even under CPU speculation conditions, and is intended to be used for
+ * SPECTRE mitigation purposes.
+ */
+size_t aws_nospec_mask(size_t index, size_t bound) {
+ /*
+ * SPECTRE mitigation - we compute a mask that will be zero if len < 0
+ * or len >= buf->len, and all-ones otherwise, and AND it into the index.
+ * It is critical that we avoid any branches in this logic.
+ */
+
+ /*
+ * Hide the index value from the optimizer. This helps ensure that all this
+ * logic doesn't get eliminated.
+ */
+#if defined(__GNUC__) || defined(__clang__)
+ __asm__ __volatile__("" : "+r"(index));
+#endif
+#if defined(_MSVC_LANG)
+ /*
+ * MSVC doesn't have a good way for us to blind the optimizer, and doesn't
+ * even have inline asm on x64. Some experimentation indicates that this
+ * hack seems to confuse it sufficiently for our needs.
+ */
+ *((volatile uint8_t *)&index) += 0;
+#endif
+
+ /*
+ * If len > (SIZE_MAX / 2), then we can end up with len - buf->len being
+ * positive simply because the sign bit got inverted away. So we also check
+ * that the sign bit isn't set from the start.
+ *
+ * We also check that bound <= (SIZE_MAX / 2) to catch cases where the
+ * buffer is _already_ out of bounds.
+ */
+ size_t negative_mask = index | bound;
+ size_t toobig_mask = bound - index - (uintptr_t)1;
+ size_t combined_mask = negative_mask | toobig_mask;
+
+ /*
+ * combined_mask needs to have its sign bit OFF for us to be in range.
+ * We'd like to expand this to a mask we can AND into our index, so flip
+ * that bit (and everything else), shift it over so it's the only bit in the
+ * ones position, and multiply across the entire register.
+ *
+ * First, extract the (inverse) top bit and move it to the lowest bit.
+ * Because there's no standard SIZE_BIT in C99, we'll divide by a mask with
+ * just the top bit set instead.
+ */
+
+ combined_mask = (~combined_mask) / (SIZE_MAX - (SIZE_MAX >> 1));
+
+ /*
+ * Now multiply it to replicate it across all bits.
+ *
+ * Note that GCC is smart enough to optimize the divide-and-multiply into
+ * an arithmetic right shift operation on x86.
+ */
+ combined_mask = combined_mask * UINTPTR_MAX;
+
+ return combined_mask;
+}
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
+
+/**
+ * Tests if the given aws_byte_cursor has at least len bytes remaining. If so,
+ * *buf is advanced by len bytes (incrementing ->ptr and decrementing ->len),
+ * and an aws_byte_cursor referring to the first len bytes of the original *buf
+ * is returned. Otherwise, an aws_byte_cursor with ->ptr = NULL, ->len = 0 is
+ * returned.
+ *
+ * Note that if len is above (SIZE_MAX / 2), this function will also treat it as
+ * a buffer overflow, and return NULL without changing *buf.
+ */
+struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cursor, const size_t len) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
+ struct aws_byte_cursor rv;
+ if (cursor->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || len > cursor->len) {
+ rv.ptr = NULL;
+ rv.len = 0;
+ } else {
+ rv.ptr = cursor->ptr;
+ rv.len = len;
+
+ cursor->ptr += len;
+ cursor->len -= len;
+ }
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv));
+ return rv;
+}
+
+/**
+ * Behaves identically to aws_byte_cursor_advance, but avoids speculative
+ * execution potentially reading out-of-bounds pointers (by returning an
+ * empty ptr in such speculated paths).
+ *
+ * This should generally be done when using an untrusted or
+ * data-dependent value for 'len', to avoid speculating into a path where
+ * cursor->ptr points outside the true ptr length.
+ */
+
+struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *const cursor, size_t len) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor));
+
+ struct aws_byte_cursor rv;
+
+ if (len <= cursor->len && len <= (SIZE_MAX >> 1) && cursor->len <= (SIZE_MAX >> 1)) {
+ /*
+ * If we're speculating past a failed bounds check, null out the pointer. This ensures
+ * that we don't try to read past the end of the buffer and leak information about other
+ * memory through timing side-channels.
+ */
+ uintptr_t mask = aws_nospec_mask(len, cursor->len + 1);
+
+ /* Make sure we don't speculate-underflow len either */
+ len = len & mask;
+ cursor->ptr = (uint8_t *)((uintptr_t)cursor->ptr & mask);
+ /* Make sure subsequent nospec accesses don't advance ptr past NULL */
+ cursor->len = cursor->len & mask;
+
+ rv.ptr = cursor->ptr;
+ /* Make sure anything acting upon the returned cursor _also_ doesn't advance past NULL */
+ rv.len = len & mask;
+
+ cursor->ptr += len;
+ cursor->len -= len;
+ } else {
+ rv.ptr = NULL;
+ rv.len = 0;
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor));
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv));
+ return rv;
+}
+
+/**
+ * Reads specified length of data from byte cursor and copies it to the
+ * destination array.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read(struct aws_byte_cursor *AWS_RESTRICT cur, void *AWS_RESTRICT dest, const size_t len) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(dest, len));
+ if (len == 0) {
+ return true;
+ }
+
+ struct aws_byte_cursor slice = aws_byte_cursor_advance_nospec(cur, len);
+
+ if (slice.ptr) {
+ memcpy(dest, slice.ptr, len);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_POSTCONDITION(AWS_MEM_IS_READABLE(dest, len));
+ return true;
+ }
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return false;
+}
+
+/**
+ * Reads as many bytes from cursor as size of buffer, and copies them to buffer.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_and_fill_buffer(
+ struct aws_byte_cursor *AWS_RESTRICT cur,
+ struct aws_byte_buf *AWS_RESTRICT dest) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
+ if (aws_byte_cursor_read(cur, dest->buffer, dest->capacity)) {
+ dest->len = dest->capacity;
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return true;
+ }
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return false;
+}
+
+/**
+ * Reads a single byte from cursor, placing it in *var.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_u8(struct aws_byte_cursor *AWS_RESTRICT cur, uint8_t *AWS_RESTRICT var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(var, 1));
+ bool rv = aws_byte_cursor_read(cur, var, 1);
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return rv;
+}
+
+/**
+ * Reads a 16-bit value in network byte order from cur, and places it in host
+ * byte order into var.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_be16(struct aws_byte_cursor *cur, uint16_t *var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
+ bool rv = aws_byte_cursor_read(cur, var, 2);
+
+ if (AWS_LIKELY(rv)) {
+ *var = aws_ntoh16(*var);
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return rv;
+}
+
+/**
+ * Reads an unsigned 24-bit value (3 bytes) in network byte order from cur,
+ * and places it in host byte order into 32-bit var.
+ * Ex: if cur's next 3 bytes are {0xAA, 0xBB, 0xCC}, then var becomes 0x00AABBCC.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_be24(struct aws_byte_cursor *cur, uint32_t *var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
+
+ uint8_t *var_bytes = (void *)var;
+
+ /* read into "lower" 3 bytes */
+ bool rv = aws_byte_cursor_read(cur, &var_bytes[1], 3);
+
+ if (AWS_LIKELY(rv)) {
+ /* zero out "highest" 4th byte*/
+ var_bytes[0] = 0;
+
+ *var = aws_ntoh32(*var);
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return rv;
+}
+
+/**
+ * Reads a 32-bit value in network byte order from cur, and places it in host
+ * byte order into var.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_be32(struct aws_byte_cursor *cur, uint32_t *var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
+ bool rv = aws_byte_cursor_read(cur, var, 4);
+
+ if (AWS_LIKELY(rv)) {
+ *var = aws_ntoh32(*var);
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return rv;
+}
+
+/**
+ * Reads a 32-bit value in network byte order from cur, and places it in host
+ * byte order into var.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_float_be32(struct aws_byte_cursor *cur, float *var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
+ bool rv = aws_byte_cursor_read(cur, var, sizeof(float));
+
+ if (AWS_LIKELY(rv)) {
+ *var = aws_ntohf32(*var);
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return rv;
+}
+
+/**
+ * Reads a 64-bit value in network byte order from cur, and places it in host
+ * byte order into var.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_float_be64(struct aws_byte_cursor *cur, double *var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
+ bool rv = aws_byte_cursor_read(cur, var, sizeof(double));
+
+ if (AWS_LIKELY(rv)) {
+ *var = aws_ntohf64(*var);
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return rv;
+}
+
+/**
+ * Reads a 64-bit value in network byte order from cur, and places it in host
+ * byte order into var.
+ *
+ * On success, returns true and updates the cursor pointer/length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_cursor_read_be64(struct aws_byte_cursor *cur, uint64_t *var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
+ bool rv = aws_byte_cursor_read(cur, var, sizeof(*var));
+
+ if (AWS_LIKELY(rv)) {
+ *var = aws_ntoh64(*var);
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return rv;
+}
+
+/* Lookup from '0' -> 0, 'f' -> 0xf, 'F' -> 0xF, etc
+ * invalid characters have value 255 */
+/* clang-format off */
+static const uint8_t s_hex_to_num_table[] = {
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255,
+ /* 0 - 9 */
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 255, 255, 255, 255, 255, 255, 255,
+ /* A - F */
+ 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255,
+ /* a - f */
+ 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+};
+AWS_STATIC_ASSERT(AWS_ARRAY_SIZE(s_hex_to_num_table) == 256);
+/* clang-format on */
+
+const uint8_t *aws_lookup_table_hex_to_num_get(void) {
+ return s_hex_to_num_table;
+}
+
+bool aws_byte_cursor_read_hex_u8(struct aws_byte_cursor *cur, uint8_t *var) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(cur));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var));
+
+ bool success = false;
+ if (AWS_LIKELY(cur->len >= 2)) {
+ const uint8_t hi = s_hex_to_num_table[cur->ptr[0]];
+ const uint8_t lo = s_hex_to_num_table[cur->ptr[1]];
+
+ /* table maps invalid characters to 255 */
+ if (AWS_LIKELY(hi != 255 && lo != 255)) {
+ *var = (hi << 4) | lo;
+ cur->ptr += 2;
+ cur->len -= 2;
+ success = true;
+ }
+ }
+
+ AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur));
+ return success;
+}
+
+/**
+ * Appends a sub-buffer to the specified buffer.
+ *
+ * If the buffer has at least `len' bytes remaining (buffer->capacity - buffer->len >= len),
+ * then buffer->len is incremented by len, and an aws_byte_buf is assigned to *output corresponding
+ * to the last len bytes of the input buffer. The aws_byte_buf at *output will have a null
+ * allocator, a zero initial length, and a capacity of 'len'. The function then returns true.
+ *
+ * If there is insufficient space, then this function nulls all fields in *output and returns
+ * false.
+ */
+bool aws_byte_buf_advance(
+ struct aws_byte_buf *const AWS_RESTRICT buffer,
+ struct aws_byte_buf *const AWS_RESTRICT output,
+ const size_t len) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buffer));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(output));
+ if (buffer->capacity - buffer->len >= len) {
+ *output = aws_byte_buf_from_array(buffer->buffer + buffer->len, len);
+ buffer->len += len;
+ output->len = 0;
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(output));
+ return true;
+ } else {
+ AWS_ZERO_STRUCT(*output);
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(output));
+ return false;
+ }
+}
+
+/**
+ * Write specified number of bytes from array to byte buffer.
+ *
+ * On success, returns true and updates the buffer length accordingly.
+ * If there is insufficient space in the buffer, returns false, leaving the
+ * buffer unchanged.
+ */
+bool aws_byte_buf_write(struct aws_byte_buf *AWS_RESTRICT buf, const uint8_t *AWS_RESTRICT src, size_t len) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(src, len), "Input array [src] must be readable up to [len] bytes.");
+
+ if (len == 0) {
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return true;
+ }
+
+ if (buf->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || buf->len + len > buf->capacity) {
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return false;
+ }
+
+ memcpy(buf->buffer + buf->len, src, len);
+ buf->len += len;
+
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return true;
+}
+
+/**
+ * Copies all bytes from buffer to buffer.
+ *
+ * On success, returns true and updates the buffer /length accordingly.
+ * If there is insufficient space in the buffer, returns false, leaving the
+ * buffer unchanged.
+ */
+bool aws_byte_buf_write_from_whole_buffer(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_buf src) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(&src));
+ return aws_byte_buf_write(buf, src.buffer, src.len);
+}
+
+/**
+ * Copies all bytes from buffer to buffer.
+ *
+ * On success, returns true and updates the buffer /length accordingly.
+ * If there is insufficient space in the buffer, returns false, leaving the
+ * buffer unchanged.
+ */
+bool aws_byte_buf_write_from_whole_cursor(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_cursor src) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&src));
+ return aws_byte_buf_write(buf, src.ptr, src.len);
+}
+
+struct aws_byte_cursor aws_byte_buf_write_to_capacity(
+ struct aws_byte_buf *buf,
+ struct aws_byte_cursor *advancing_cursor) {
+
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(advancing_cursor));
+
+ size_t available = buf->capacity - buf->len;
+ size_t write_size = aws_min_size(available, advancing_cursor->len);
+ struct aws_byte_cursor write_cursor = aws_byte_cursor_advance(advancing_cursor, write_size);
+ aws_byte_buf_write_from_whole_cursor(buf, write_cursor);
+ return write_cursor;
+}
+
+/**
+ * Copies one byte to buffer.
+ *
+ * On success, returns true and updates the cursor /length
+ accordingly.
+
+ * If there is insufficient space in the cursor, returns false, leaving the
+ cursor unchanged.
+ */
+bool aws_byte_buf_write_u8(struct aws_byte_buf *AWS_RESTRICT buf, uint8_t c) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ return aws_byte_buf_write(buf, &c, 1);
+}
+
+/**
+ * Writes one byte repeatedly to buffer (like memset)
+ *
+ * If there is insufficient space in the buffer, returns false, leaving the
+ * buffer unchanged.
+ */
+bool aws_byte_buf_write_u8_n(struct aws_byte_buf *buf, uint8_t c, size_t count) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+
+ if (buf->len > (SIZE_MAX >> 1) || count > (SIZE_MAX >> 1) || buf->len + count > buf->capacity) {
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return false;
+ }
+
+ memset(buf->buffer + buf->len, c, count);
+ buf->len += count;
+
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return true;
+}
+
+/**
+ * Writes a 16-bit integer in network byte order (big endian) to buffer.
+ *
+ * On success, returns true and updates the cursor /length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_buf_write_be16(struct aws_byte_buf *buf, uint16_t x) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ x = aws_hton16(x);
+ return aws_byte_buf_write(buf, (uint8_t *)&x, 2);
+}
+
+/**
+ * Writes low 24-bits (3 bytes) of an unsigned integer in network byte order (big endian) to buffer.
+ * Ex: If x is 0x00AABBCC then {0xAA, 0xBB, 0xCC} is written to buffer.
+ *
+ * On success, returns true and updates the buffer /length accordingly.
+ * If there is insufficient space in the buffer, or x's value cannot fit in 3 bytes,
+ * returns false, leaving the buffer unchanged.
+ */
+bool aws_byte_buf_write_be24(struct aws_byte_buf *buf, uint32_t x) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+
+ if (x > 0x00FFFFFF) {
+ return false;
+ }
+
+ uint32_t be32 = aws_hton32(x);
+ uint8_t *be32_bytes = (uint8_t *)&be32;
+
+ /* write "lower" 3 bytes */
+ return aws_byte_buf_write(buf, &be32_bytes[1], 3);
+}
+
+/**
+ * Writes a 32-bit integer in network byte order (big endian) to buffer.
+ *
+ * On success, returns true and updates the cursor /length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_buf_write_be32(struct aws_byte_buf *buf, uint32_t x) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ x = aws_hton32(x);
+ return aws_byte_buf_write(buf, (uint8_t *)&x, 4);
+}
+
+/**
+ * Writes a 32-bit float in network byte order (big endian) to buffer.
+ *
+ * On success, returns true and updates the cursor /length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_buf_write_float_be32(struct aws_byte_buf *buf, float x) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ x = aws_htonf32(x);
+ return aws_byte_buf_write(buf, (uint8_t *)&x, 4);
+}
+
+/**
+ * Writes a 64-bit integer in network byte order (big endian) to buffer.
+ *
+ * On success, returns true and updates the cursor /length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_buf_write_be64(struct aws_byte_buf *buf, uint64_t x) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ x = aws_hton64(x);
+ return aws_byte_buf_write(buf, (uint8_t *)&x, 8);
+}
+
+/**
+ * Writes a 64-bit float in network byte order (big endian) to buffer.
+ *
+ * On success, returns true and updates the cursor /length accordingly.
+ * If there is insufficient space in the cursor, returns false, leaving the
+ * cursor unchanged.
+ */
+bool aws_byte_buf_write_float_be64(struct aws_byte_buf *buf, double x) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ x = aws_htonf64(x);
+ return aws_byte_buf_write(buf, (uint8_t *)&x, 8);
+}
+
+int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_cursor *from_and_update) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(to));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(from_and_update));
+
+ if (aws_byte_buf_append(to, from_and_update)) {
+ return AWS_OP_ERR;
+ }
+
+ from_and_update->ptr = to->buffer + (to->len - from_and_update->len);
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_byte_cursor s_null_terminator_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\0");
+int aws_byte_buf_append_null_terminator(struct aws_byte_buf *buf) {
+ return aws_byte_buf_append_dynamic(buf, &s_null_terminator_cursor);
+}
+
+bool aws_isalnum(uint8_t ch) {
+ return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9');
+}
+
+bool aws_isalpha(uint8_t ch) {
+ return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z');
+}
+
+bool aws_isdigit(uint8_t ch) {
+ return (ch >= '0' && ch <= '9');
+}
+
+bool aws_isxdigit(uint8_t ch) {
+ return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F');
+}
+
+bool aws_isspace(uint8_t ch) {
+ switch (ch) {
+ case 0x20: /* ' ' - space */
+ return true;
+ case 0x09: /* '\t' - horizontal tab */
+ return true;
+ case 0x0A: /* '\n' - line feed */
+ return true;
+ case 0x0B: /* '\v' - vertical tab */
+ return true;
+ case 0x0C: /* '\f' - form feed */
+ return true;
+ case 0x0D: /* '\r' - carriage return */
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/cache.c b/contrib/restricted/aws/aws-c-common/source/cache.c
index eddc4f837f..058845d106 100644
--- a/contrib/restricted/aws/aws-c-common/source/cache.c
+++ b/contrib/restricted/aws/aws-c-common/source/cache.c
@@ -1,60 +1,60 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-#include <aws/common/cache.h>
-
-void aws_cache_destroy(struct aws_cache *cache) {
- AWS_PRECONDITION(cache);
- cache->vtable->destroy(cache);
-}
-
-int aws_cache_find(struct aws_cache *cache, const void *key, void **p_value) {
- AWS_PRECONDITION(cache);
- return cache->vtable->find(cache, key, p_value);
-}
-
-int aws_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
- AWS_PRECONDITION(cache);
- return cache->vtable->put(cache, key, p_value);
-}
-
-int aws_cache_remove(struct aws_cache *cache, const void *key) {
- AWS_PRECONDITION(cache);
- return cache->vtable->remove(cache, key);
-}
-
-void aws_cache_clear(struct aws_cache *cache) {
- AWS_PRECONDITION(cache);
- cache->vtable->clear(cache);
-}
-
-size_t aws_cache_get_element_count(const struct aws_cache *cache) {
- AWS_PRECONDITION(cache);
- return cache->vtable->get_element_count(cache);
-}
-
-void aws_cache_base_default_destroy(struct aws_cache *cache) {
- aws_linked_hash_table_clean_up(&cache->table);
- aws_mem_release(cache->allocator, cache);
-}
-
-int aws_cache_base_default_find(struct aws_cache *cache, const void *key, void **p_value) {
- return (aws_linked_hash_table_find(&cache->table, key, p_value));
-}
-
-int aws_cache_base_default_remove(struct aws_cache *cache, const void *key) {
- /* allocated cache memory and the linked list entry will be removed in the
- * callback. */
- return aws_linked_hash_table_remove(&cache->table, key);
-}
-
-void aws_cache_base_default_clear(struct aws_cache *cache) {
- /* clearing the table will remove all elements. That will also deallocate
- * any cache entries we currently have. */
- aws_linked_hash_table_clear(&cache->table);
-}
-
-size_t aws_cache_base_default_get_element_count(const struct aws_cache *cache) {
- return aws_linked_hash_table_get_element_count(&cache->table);
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/cache.h>
+
+void aws_cache_destroy(struct aws_cache *cache) {
+ AWS_PRECONDITION(cache);
+ cache->vtable->destroy(cache);
+}
+
+int aws_cache_find(struct aws_cache *cache, const void *key, void **p_value) {
+ AWS_PRECONDITION(cache);
+ return cache->vtable->find(cache, key, p_value);
+}
+
+int aws_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
+ AWS_PRECONDITION(cache);
+ return cache->vtable->put(cache, key, p_value);
+}
+
+int aws_cache_remove(struct aws_cache *cache, const void *key) {
+ AWS_PRECONDITION(cache);
+ return cache->vtable->remove(cache, key);
+}
+
+void aws_cache_clear(struct aws_cache *cache) {
+ AWS_PRECONDITION(cache);
+ cache->vtable->clear(cache);
+}
+
+size_t aws_cache_get_element_count(const struct aws_cache *cache) {
+ AWS_PRECONDITION(cache);
+ return cache->vtable->get_element_count(cache);
+}
+
+void aws_cache_base_default_destroy(struct aws_cache *cache) {
+ aws_linked_hash_table_clean_up(&cache->table);
+ aws_mem_release(cache->allocator, cache);
+}
+
+int aws_cache_base_default_find(struct aws_cache *cache, const void *key, void **p_value) {
+ return (aws_linked_hash_table_find(&cache->table, key, p_value));
+}
+
+int aws_cache_base_default_remove(struct aws_cache *cache, const void *key) {
+ /* allocated cache memory and the linked list entry will be removed in the
+ * callback. */
+ return aws_linked_hash_table_remove(&cache->table, key);
+}
+
+void aws_cache_base_default_clear(struct aws_cache *cache) {
+ /* clearing the table will remove all elements. That will also deallocate
+ * any cache entries we currently have. */
+ aws_linked_hash_table_clear(&cache->table);
+}
+
+size_t aws_cache_base_default_get_element_count(const struct aws_cache *cache) {
+ return aws_linked_hash_table_get_element_count(&cache->table);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/codegen.c b/contrib/restricted/aws/aws-c-common/source/codegen.c
index 18e702f428..ea6e95d548 100644
--- a/contrib/restricted/aws/aws-c-common/source/codegen.c
+++ b/contrib/restricted/aws/aws-c-common/source/codegen.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
/*
@@ -9,14 +9,14 @@
#define AWS_STATIC_IMPL AWS_COMMON_API
-#include <aws/common/array_list.inl>
-#include <aws/common/atomics.inl>
-#include <aws/common/byte_order.inl>
-#include <aws/common/clock.inl>
-#include <aws/common/encoding.inl>
-#include <aws/common/error.inl>
-#include <aws/common/linked_list.inl>
-#include <aws/common/math.inl>
-#include <aws/common/ring_buffer.inl>
-#include <aws/common/string.inl>
-#include <aws/common/zero.inl>
+#include <aws/common/array_list.inl>
+#include <aws/common/atomics.inl>
+#include <aws/common/byte_order.inl>
+#include <aws/common/clock.inl>
+#include <aws/common/encoding.inl>
+#include <aws/common/error.inl>
+#include <aws/common/linked_list.inl>
+#include <aws/common/math.inl>
+#include <aws/common/ring_buffer.inl>
+#include <aws/common/string.inl>
+#include <aws/common/zero.inl>
diff --git a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
index 91a3769236..ccbe6d1820 100644
--- a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
+++ b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/command_line_parser.h>
@@ -85,17 +85,17 @@ int aws_cli_getopt_long(
if (option) {
bool has_arg = false;
- char *opt_value = memchr(optstring, option->val, strlen(optstring));
- if (!opt_value) {
- return '?';
+ char *opt_value = memchr(optstring, option->val, strlen(optstring));
+ if (!opt_value) {
+ return '?';
+ }
+
+ if (opt_value[1] == ':') {
+ has_arg = true;
}
- if (opt_value[1] == ':') {
- has_arg = true;
- }
-
if (has_arg) {
- if (aws_cli_optind >= argc) {
+ if (aws_cli_optind >= argc) {
return '?';
}
diff --git a/contrib/restricted/aws/aws-c-common/source/common.c b/contrib/restricted/aws/aws-c-common/source/common.c
index 7b36210188..88c5d262c8 100644
--- a/contrib/restricted/aws/aws-c-common/source/common.c
+++ b/contrib/restricted/aws/aws-c-common/source/common.c
@@ -1,20 +1,20 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
-#include <aws/common/logging.h>
+#include <aws/common/logging.h>
#include <aws/common/math.h>
-#include <aws/common/private/dlloads.h>
+#include <aws/common/private/dlloads.h>
#include <stdarg.h>
#include <stdlib.h>
#ifdef _WIN32
# include <Windows.h>
-#else
-# include <dlfcn.h>
+#else
+# include <dlfcn.h>
#endif
#ifdef __MACH__
@@ -27,47 +27,47 @@
# pragma warning(disable : 4100)
#endif
-long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long) = NULL;
-void *g_libnuma_handle = NULL;
+long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long) = NULL;
+void *g_libnuma_handle = NULL;
-void aws_secure_zero(void *pBuf, size_t bufsize) {
-#if defined(_WIN32)
- SecureZeroMemory(pBuf, bufsize);
-#else
- /* We cannot use memset_s, even on a C11 compiler, because that would require
- * that __STDC_WANT_LIB_EXT1__ be defined before the _first_ inclusion of string.h.
- *
- * We'll try to work around this by using inline asm on GCC-like compilers,
- * and by exposing the buffer pointer in a volatile local pointer elsewhere.
+void aws_secure_zero(void *pBuf, size_t bufsize) {
+#if defined(_WIN32)
+ SecureZeroMemory(pBuf, bufsize);
+#else
+ /* We cannot use memset_s, even on a C11 compiler, because that would require
+ * that __STDC_WANT_LIB_EXT1__ be defined before the _first_ inclusion of string.h.
+ *
+ * We'll try to work around this by using inline asm on GCC-like compilers,
+ * and by exposing the buffer pointer in a volatile local pointer elsewhere.
*/
-# if defined(__GNUC__) || defined(__clang__)
- memset(pBuf, 0, bufsize);
- /* This inline asm serves to convince the compiler that the buffer is (somehow) still
- * used after the zero, and therefore that the optimizer can't eliminate the memset.
- */
- __asm__ __volatile__("" /* The asm doesn't actually do anything. */
- : /* no outputs */
- /* Tell the compiler that the asm code has access to the pointer to the buffer,
- * and therefore it might be reading the (now-zeroed) buffer.
- * Without this. clang/LLVM 9.0.0 optimizes away a memset of a stack buffer.
- */
- : "r"(pBuf)
- /* Also clobber memory. While this seems like it might be unnecessary - after all,
- * it's enough that the asm might read the buffer, right? - in practice GCC 7.3.0
- * seems to optimize a zero of a stack buffer without it.
- */
- : "memory");
-# else // not GCC/clang
- /* We don't have access to inline asm, since we're on a non-GCC platform. Move the pointer
- * through a volatile pointer in an attempt to confuse the optimizer.
- */
- volatile void *pVolBuf = pBuf;
- memset(pVolBuf, 0, bufsize);
-# endif // #else not GCC/clang
-#endif // #else not windows
+# if defined(__GNUC__) || defined(__clang__)
+ memset(pBuf, 0, bufsize);
+ /* This inline asm serves to convince the compiler that the buffer is (somehow) still
+ * used after the zero, and therefore that the optimizer can't eliminate the memset.
+ */
+ __asm__ __volatile__("" /* The asm doesn't actually do anything. */
+ : /* no outputs */
+ /* Tell the compiler that the asm code has access to the pointer to the buffer,
+ * and therefore it might be reading the (now-zeroed) buffer.
+ * Without this. clang/LLVM 9.0.0 optimizes away a memset of a stack buffer.
+ */
+ : "r"(pBuf)
+ /* Also clobber memory. While this seems like it might be unnecessary - after all,
+ * it's enough that the asm might read the buffer, right? - in practice GCC 7.3.0
+ * seems to optimize a zero of a stack buffer without it.
+ */
+ : "memory");
+# else // not GCC/clang
+ /* We don't have access to inline asm, since we're on a non-GCC platform. Move the pointer
+ * through a volatile pointer in an attempt to confuse the optimizer.
+ */
+ volatile void *pVolBuf = pBuf;
+ memset(pVolBuf, 0, bufsize);
+# endif // #else not GCC/clang
+#endif // #else not windows
}
-#define AWS_DEFINE_ERROR_INFO_COMMON(C, ES) [(C)-0x0000] = AWS_DEFINE_ERROR_INFO(C, ES, "aws-c-common")
+#define AWS_DEFINE_ERROR_INFO_COMMON(C, ES) [(C)-0x0000] = AWS_DEFINE_ERROR_INFO(C, ES, "aws-c-common")
/* clang-format off */
static struct aws_error_info errors[] = {
AWS_DEFINE_ERROR_INFO_COMMON(
@@ -202,30 +202,30 @@ static struct aws_error_info errors[] = {
AWS_ERROR_ENVIRONMENT_UNSET,
"System call failure when unsetting an environment variable."
),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_SYS_CALL_FAILURE,
- "System call failure"),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_FILE_INVALID_PATH,
- "Invalid file path."),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_MAX_FDS_EXCEEDED,
- "The maximum number of fds has been exceeded."),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_NO_PERMISSION,
- "User does not have permission to perform the requested action."),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_STREAM_UNSEEKABLE,
- "Stream does not support seek operations"),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED,
- "A c-string like buffer was passed but a null terminator was not found within the bounds of the buffer."),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_STRING_MATCH_NOT_FOUND,
- "The specified substring was not present in the input string."),
- AWS_DEFINE_ERROR_INFO_COMMON(
- AWS_ERROR_DIVIDE_BY_ZERO,
- "Attempt to divide a number by zero."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_SYS_CALL_FAILURE,
+ "System call failure"),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_FILE_INVALID_PATH,
+ "Invalid file path."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_MAX_FDS_EXCEEDED,
+ "The maximum number of fds has been exceeded."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_NO_PERMISSION,
+ "User does not have permission to perform the requested action."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_STREAM_UNSEEKABLE,
+ "Stream does not support seek operations"),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED,
+ "A c-string like buffer was passed but a null terminator was not found within the bounds of the buffer."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_STRING_MATCH_NOT_FOUND,
+ "The specified substring was not present in the input string."),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_DIVIDE_BY_ZERO,
+ "Attempt to divide a number by zero."),
};
/* clang-format on */
@@ -234,77 +234,77 @@ static struct aws_error_info_list s_list = {
.count = AWS_ARRAY_SIZE(errors),
};
-static struct aws_log_subject_info s_common_log_subject_infos[] = {
- DEFINE_LOG_SUBJECT_INFO(
- AWS_LS_COMMON_GENERAL,
- "aws-c-common",
- "Subject for aws-c-common logging that doesn't belong to any particular category"),
- DEFINE_LOG_SUBJECT_INFO(
- AWS_LS_COMMON_TASK_SCHEDULER,
- "task-scheduler",
- "Subject for task scheduler or task specific logging."),
- DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_THREAD, "thread", "Subject for logging thread related functions."),
- DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_XML_PARSER, "xml-parser", "Subject for xml parser specific logging."),
- DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_MEMTRACE, "memtrace", "Output from the aws_mem_trace_dump function"),
-};
-
-static struct aws_log_subject_info_list s_common_log_subject_list = {
- .subject_list = s_common_log_subject_infos,
- .count = AWS_ARRAY_SIZE(s_common_log_subject_infos),
-};
-
-static bool s_common_library_initialized = false;
-
-void aws_common_library_init(struct aws_allocator *allocator) {
- (void)allocator;
-
- if (!s_common_library_initialized) {
- s_common_library_initialized = true;
+static struct aws_log_subject_info s_common_log_subject_infos[] = {
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_COMMON_GENERAL,
+ "aws-c-common",
+ "Subject for aws-c-common logging that doesn't belong to any particular category"),
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_COMMON_TASK_SCHEDULER,
+ "task-scheduler",
+ "Subject for task scheduler or task specific logging."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_THREAD, "thread", "Subject for logging thread related functions."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_XML_PARSER, "xml-parser", "Subject for xml parser specific logging."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_MEMTRACE, "memtrace", "Output from the aws_mem_trace_dump function"),
+};
+
+static struct aws_log_subject_info_list s_common_log_subject_list = {
+ .subject_list = s_common_log_subject_infos,
+ .count = AWS_ARRAY_SIZE(s_common_log_subject_infos),
+};
+
+static bool s_common_library_initialized = false;
+
+void aws_common_library_init(struct aws_allocator *allocator) {
+ (void)allocator;
+
+ if (!s_common_library_initialized) {
+ s_common_library_initialized = true;
aws_register_error_info(&s_list);
- aws_register_log_subject_info_list(&s_common_log_subject_list);
-
-/* NUMA is funky and we can't rely on libnuma.so being available. We also don't want to take a hard dependency on it,
- * try and load it if we can. */
-#if !defined(_WIN32) && !defined(WIN32)
- g_libnuma_handle = dlopen("libnuma.so", RTLD_NOW);
-
- if (g_libnuma_handle) {
- AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so loaded");
- *(void **)(&g_set_mempolicy_ptr) = dlsym(g_libnuma_handle, "set_mempolicy");
- if (g_set_mempolicy_ptr) {
- AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() loaded");
- } else {
- AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() failed to load");
- }
- } else {
- AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so failed to load");
- }
-#endif
+ aws_register_log_subject_info_list(&s_common_log_subject_list);
+
+/* NUMA is funky and we can't rely on libnuma.so being available. We also don't want to take a hard dependency on it,
+ * try and load it if we can. */
+#if !defined(_WIN32) && !defined(WIN32)
+ g_libnuma_handle = dlopen("libnuma.so", RTLD_NOW);
+
+ if (g_libnuma_handle) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so loaded");
+ *(void **)(&g_set_mempolicy_ptr) = dlsym(g_libnuma_handle, "set_mempolicy");
+ if (g_set_mempolicy_ptr) {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() loaded");
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() failed to load");
+ }
+ } else {
+ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so failed to load");
+ }
+#endif
}
}
-void aws_common_library_clean_up(void) {
- if (s_common_library_initialized) {
- s_common_library_initialized = false;
- aws_unregister_error_info(&s_list);
- aws_unregister_log_subject_info_list(&s_common_log_subject_list);
-#if !defined(_WIN32) && !defined(WIN32)
- if (g_libnuma_handle) {
- dlclose(g_libnuma_handle);
- }
-#endif
- }
+void aws_common_library_clean_up(void) {
+ if (s_common_library_initialized) {
+ s_common_library_initialized = false;
+ aws_unregister_error_info(&s_list);
+ aws_unregister_log_subject_info_list(&s_common_log_subject_list);
+#if !defined(_WIN32) && !defined(WIN32)
+ if (g_libnuma_handle) {
+ dlclose(g_libnuma_handle);
+ }
+#endif
+ }
+}
+
+void aws_common_fatal_assert_library_initialized(void) {
+ if (!s_common_library_initialized) {
+ fprintf(
+ stderr, "%s", "aws_common_library_init() must be called before using any functionality in aws-c-common.");
+
+ AWS_FATAL_ASSERT(s_common_library_initialized);
+ }
}
-void aws_common_fatal_assert_library_initialized(void) {
- if (!s_common_library_initialized) {
- fprintf(
- stderr, "%s", "aws_common_library_init() must be called before using any functionality in aws-c-common.");
-
- AWS_FATAL_ASSERT(s_common_library_initialized);
- }
-}
-
#ifdef _MSC_VER
# pragma warning(pop)
#endif
diff --git a/contrib/restricted/aws/aws-c-common/source/condition_variable.c b/contrib/restricted/aws/aws-c-common/source/condition_variable.c
index 00f6db525d..6d67dbbeaa 100644
--- a/contrib/restricted/aws/aws-c-common/source/condition_variable.c
+++ b/contrib/restricted/aws/aws-c-common/source/condition_variable.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/condition_variable.h>
diff --git a/contrib/restricted/aws/aws-c-common/source/date_time.c b/contrib/restricted/aws/aws-c-common/source/date_time.c
index a3f81caf22..8d08e57ad8 100644
--- a/contrib/restricted/aws/aws-c-common/source/date_time.c
+++ b/contrib/restricted/aws/aws-c-common/source/date_time.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/date_time.h>
@@ -215,7 +215,7 @@ static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor,
size_t sub_index = index - state_start_index;
switch (state) {
case ON_YEAR:
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0');
if (sub_index == 3) {
state = ON_MONTH;
@@ -228,7 +228,7 @@ static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor,
break;
case ON_MONTH:
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
parsed_time->tm_mon = parsed_time->tm_mon * 10 + (c - '0');
if (sub_index == 1) {
state = ON_MONTH_DAY;
@@ -244,7 +244,7 @@ static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor,
if (c == 'T' && sub_index == 2) {
state = ON_HOUR;
state_start_index = index + 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0');
} else {
error = true;
@@ -252,7 +252,7 @@ static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor,
break;
case ON_HOUR:
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0');
if (sub_index == 1) {
state = ON_MINUTE;
@@ -264,7 +264,7 @@ static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor,
break;
case ON_MINUTE:
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0');
if (sub_index == 1) {
state = ON_SECOND;
@@ -276,7 +276,7 @@ static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor,
break;
case ON_SECOND:
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0');
if (sub_index == 1) {
state = ON_TZ;
@@ -290,7 +290,7 @@ static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor,
case ON_TZ:
if (c == 'Z' && (sub_index == 0 || sub_index == 3)) {
state = FINISHED;
- } else if (!aws_isdigit(c) || sub_index > 3) {
+ } else if (!aws_isdigit(c) || sub_index > 3) {
error = true;
}
break;
@@ -324,7 +324,7 @@ static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struc
state = ON_MONTH;
state_start_index = index + 1;
parsed_time->tm_year -= 1900;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0');
} else {
error = true;
@@ -335,7 +335,7 @@ static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struc
state = ON_MONTH_DAY;
state_start_index = index + 1;
parsed_time->tm_mon -= 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_mon = parsed_time->tm_mon * 10 + (c - '0');
} else {
error = true;
@@ -346,7 +346,7 @@ static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struc
if (c == 'T' && index - state_start_index == 2) {
state = ON_HOUR;
state_start_index = index + 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0');
} else {
error = true;
@@ -358,13 +358,13 @@ static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struc
if (index - state_start_index == 2) {
state = ON_MINUTE;
state_start_index = index + 1;
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
state_start_index = index;
advance = false;
} else if (c != ':') {
error = true;
}
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0');
} else {
error = true;
@@ -376,13 +376,13 @@ static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struc
if (index - state_start_index == 2) {
state = ON_SECOND;
state_start_index = index + 1;
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
state_start_index = index;
advance = false;
} else if (c != ':') {
error = true;
}
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0');
} else {
error = true;
@@ -396,7 +396,7 @@ static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struc
} else if (c == '.' && index - state_start_index == 2) {
state = ON_TZ;
state_start_index = index + 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0');
} else {
error = true;
@@ -407,7 +407,7 @@ static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struc
if (c == 'Z') {
state = FINISHED;
state_start_index = index + 1;
- } else if (!aws_isdigit(c)) {
+ } else if (!aws_isdigit(c)) {
error = true;
}
break;
@@ -449,14 +449,14 @@ static int s_parse_rfc_822(
if (c == ',') {
state = ON_SPACE_DELIM;
state_start_index = index + 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
state = ON_MONTH_DAY;
- } else if (!aws_isalpha(c)) {
+ } else if (!aws_isalpha(c)) {
error = true;
}
break;
case ON_SPACE_DELIM:
- if (aws_isspace(c)) {
+ if (aws_isspace(c)) {
state = ON_MONTH_DAY;
state_start_index = index + 1;
} else {
@@ -464,9 +464,9 @@ static int s_parse_rfc_822(
}
break;
case ON_MONTH_DAY:
- if (aws_isdigit(c)) {
+ if (aws_isdigit(c)) {
parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0');
- } else if (aws_isspace(c)) {
+ } else if (aws_isspace(c)) {
state = ON_MONTH;
state_start_index = index + 1;
} else {
@@ -474,7 +474,7 @@ static int s_parse_rfc_822(
}
break;
case ON_MONTH:
- if (aws_isspace(c)) {
+ if (aws_isspace(c)) {
int monthNumber =
get_month_number_from_str((const char *)date_str_cursor->ptr, state_start_index, index + 1);
@@ -485,21 +485,21 @@ static int s_parse_rfc_822(
} else {
error = true;
}
- } else if (!aws_isalpha(c)) {
+ } else if (!aws_isalpha(c)) {
error = true;
}
break;
/* year can be 4 or 2 digits. */
case ON_YEAR:
- if (aws_isspace(c) && index - state_start_index == 4) {
+ if (aws_isspace(c) && index - state_start_index == 4) {
state = ON_HOUR;
state_start_index = index + 1;
parsed_time->tm_year -= 1900;
- } else if (aws_isspace(c) && index - state_start_index == 2) {
+ } else if (aws_isspace(c) && index - state_start_index == 2) {
state = 5;
state_start_index = index + 1;
parsed_time->tm_year += 2000 - 1900;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0');
} else {
error = true;
@@ -509,7 +509,7 @@ static int s_parse_rfc_822(
if (c == ':' && index - state_start_index == 2) {
state = ON_MINUTE;
state_start_index = index + 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0');
} else {
error = true;
@@ -519,24 +519,24 @@ static int s_parse_rfc_822(
if (c == ':' && index - state_start_index == 2) {
state = ON_SECOND;
state_start_index = index + 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0');
} else {
error = true;
}
break;
case ON_SECOND:
- if (aws_isspace(c) && index - state_start_index == 2) {
+ if (aws_isspace(c) && index - state_start_index == 2) {
state = ON_TZ;
state_start_index = index + 1;
- } else if (aws_isdigit(c)) {
+ } else if (aws_isdigit(c)) {
parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0');
} else {
error = true;
}
break;
case ON_TZ:
- if ((aws_isalnum(c) || c == '-' || c == '+') && (index - state_start_index) < 5) {
+ if ((aws_isalnum(c) || c == '-' || c == '+') && (index - state_start_index) < 5) {
dt->tz[index - state_start_index] = c;
} else {
error = true;
@@ -566,7 +566,7 @@ int aws_date_time_init_from_str_cursor(
struct aws_date_time *dt,
const struct aws_byte_cursor *date_str_cursor,
enum aws_date_format fmt) {
- AWS_ERROR_PRECONDITION(date_str_cursor->len <= AWS_DATE_TIME_STR_MAX_LEN, AWS_ERROR_OVERFLOW_DETECTED);
+ AWS_ERROR_PRECONDITION(date_str_cursor->len <= AWS_DATE_TIME_STR_MAX_LEN, AWS_ERROR_OVERFLOW_DETECTED);
AWS_ZERO_STRUCT(*dt);
@@ -639,7 +639,7 @@ int aws_date_time_init_from_str(
struct aws_date_time *dt,
const struct aws_byte_buf *date_str,
enum aws_date_format fmt) {
- AWS_ERROR_PRECONDITION(date_str->len <= AWS_DATE_TIME_STR_MAX_LEN, AWS_ERROR_OVERFLOW_DETECTED);
+ AWS_ERROR_PRECONDITION(date_str->len <= AWS_DATE_TIME_STR_MAX_LEN, AWS_ERROR_OVERFLOW_DETECTED);
struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(date_str);
return aws_date_time_init_from_str_cursor(dt, &date_cursor, fmt);
diff --git a/contrib/restricted/aws/aws-c-common/source/device_random.c b/contrib/restricted/aws/aws-c-common/source/device_random.c
index 7531c666fb..3df8a218e7 100644
--- a/contrib/restricted/aws/aws-c-common/source/device_random.c
+++ b/contrib/restricted/aws/aws-c-common/source/device_random.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/device_random.h>
diff --git a/contrib/restricted/aws/aws-c-common/source/encoding.c b/contrib/restricted/aws/aws-c-common/source/encoding.c
index e3ac587c04..26a41fa163 100644
--- a/contrib/restricted/aws/aws-c-common/source/encoding.c
+++ b/contrib/restricted/aws/aws-c-common/source/encoding.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/encoding.h>
@@ -269,39 +269,39 @@ int aws_base64_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, stru
AWS_ASSERT(to_encode->ptr);
AWS_ASSERT(output->buffer);
- size_t terminated_length = 0;
+ size_t terminated_length = 0;
size_t encoded_length = 0;
- if (AWS_UNLIKELY(aws_base64_compute_encoded_len(to_encode->len, &terminated_length))) {
+ if (AWS_UNLIKELY(aws_base64_compute_encoded_len(to_encode->len, &terminated_length))) {
return AWS_OP_ERR;
}
- size_t needed_capacity = 0;
- if (AWS_UNLIKELY(aws_add_size_checked(output->len, terminated_length, &needed_capacity))) {
- return AWS_OP_ERR;
- }
-
- if (AWS_UNLIKELY(output->capacity < needed_capacity)) {
+ size_t needed_capacity = 0;
+ if (AWS_UNLIKELY(aws_add_size_checked(output->len, terminated_length, &needed_capacity))) {
+ return AWS_OP_ERR;
+ }
+
+ if (AWS_UNLIKELY(output->capacity < needed_capacity)) {
return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
}
- /*
- * For convenience to standard C functions expecting a null-terminated
- * string, the output is terminated. As the encoding itself can be used in
- * various ways, however, its length should never account for that byte.
- */
- encoded_length = (terminated_length - 1);
-
+ /*
+ * For convenience to standard C functions expecting a null-terminated
+ * string, the output is terminated. As the encoding itself can be used in
+ * various ways, however, its length should never account for that byte.
+ */
+ encoded_length = (terminated_length - 1);
+
if (aws_common_private_has_avx2()) {
- aws_common_private_base64_encode_sse41(to_encode->ptr, output->buffer + output->len, to_encode->len);
- output->buffer[output->len + encoded_length] = 0;
- output->len += encoded_length;
+ aws_common_private_base64_encode_sse41(to_encode->ptr, output->buffer + output->len, to_encode->len);
+ output->buffer[output->len + encoded_length] = 0;
+ output->len += encoded_length;
return AWS_OP_SUCCESS;
}
size_t buffer_length = to_encode->len;
size_t block_count = (buffer_length + 2) / 3;
size_t remainder_count = (buffer_length % 3);
- size_t str_index = output->len;
+ size_t str_index = output->len;
for (size_t i = 0; i < to_encode->len; i += 3) {
uint32_t block = to_encode->ptr[i];
@@ -323,17 +323,17 @@ int aws_base64_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, stru
}
if (remainder_count > 0) {
- output->buffer[output->len + block_count * 4 - 1] = '=';
+ output->buffer[output->len + block_count * 4 - 1] = '=';
if (remainder_count == 1) {
- output->buffer[output->len + block_count * 4 - 2] = '=';
+ output->buffer[output->len + block_count * 4 - 2] = '=';
}
}
/* it's a string add the null terminator. */
- output->buffer[output->len + encoded_length] = 0;
+ output->buffer[output->len + encoded_length] = 0;
+
+ output->len += encoded_length;
- output->len += encoded_length;
-
return AWS_OP_SUCCESS;
}
diff --git a/contrib/restricted/aws/aws-c-common/source/error.c b/contrib/restricted/aws/aws-c-common/source/error.c
index 0631bdd9c6..60e6c9e799 100644
--- a/contrib/restricted/aws/aws-c-common/source/error.c
+++ b/contrib/restricted/aws/aws-c-common/source/error.c
@@ -1,13 +1,13 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/error.h>
#include <aws/common/common.h>
-#include <errno.h>
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
@@ -21,13 +21,13 @@ AWS_THREAD_LOCAL void *tl_thread_handler_context = NULL;
/* Since slot size is 00000100 00000000, to divide, we need to shift right by 10
* bits to find the slot, and to find the modulus, we use a binary and with
- * 00000011 11111111 to find the index in that slot.
- */
-#define SLOT_MASK (AWS_ERROR_ENUM_STRIDE - 1)
+ * 00000011 11111111 to find the index in that slot.
+ */
+#define SLOT_MASK (AWS_ERROR_ENUM_STRIDE - 1)
-static const int MAX_ERROR_CODE = AWS_ERROR_ENUM_STRIDE * AWS_PACKAGE_SLOTS;
+static const int MAX_ERROR_CODE = AWS_ERROR_ENUM_STRIDE * AWS_PACKAGE_SLOTS;
-static const struct aws_error_info_list *volatile ERROR_SLOTS[AWS_PACKAGE_SLOTS] = {0};
+static const struct aws_error_info_list *volatile ERROR_SLOTS[AWS_PACKAGE_SLOTS] = {0};
int aws_last_error(void) {
return tl_last_error;
@@ -38,8 +38,8 @@ static const struct aws_error_info *get_error_by_code(int err) {
return NULL;
}
- uint32_t slot_index = (uint32_t)err >> AWS_ERROR_ENUM_STRIDE_BITS;
- uint32_t error_index = (uint32_t)err & SLOT_MASK;
+ uint32_t slot_index = (uint32_t)err >> AWS_ERROR_ENUM_STRIDE_BITS;
+ uint32_t error_index = (uint32_t)err & SLOT_MASK;
const struct aws_error_info_list *error_slot = ERROR_SLOTS[slot_index];
@@ -130,75 +130,75 @@ void aws_register_error_info(const struct aws_error_info_list *error_info) {
* - we'll either segfault immediately (for the first two) or for the count
* assert, the registration will be ineffective.
*/
- AWS_FATAL_ASSERT(error_info);
- AWS_FATAL_ASSERT(error_info->error_list);
- AWS_FATAL_ASSERT(error_info->count);
-
- const int min_range = error_info->error_list[0].error_code;
- const int slot_index = min_range >> AWS_ERROR_ENUM_STRIDE_BITS;
-
- if (slot_index >= AWS_PACKAGE_SLOTS || slot_index < 0) {
- /* This is an NDEBUG build apparently. Kill the process rather than
- * corrupting heap. */
- fprintf(stderr, "Bad error slot index %d\n", slot_index);
- AWS_FATAL_ASSERT(false);
- }
-
-#if DEBUG_BUILD
- /* Assert that error info entries are in the right order. */
- for (int i = 1; i < error_info->count; ++i) {
- const int expected_code = min_range + i;
- const struct aws_error_info *info = &error_info->error_list[i];
- if (info->error_code != expected_code) {
- if (info->error_code) {
- fprintf(stderr, "Error %s is at wrong index of error info list.\n", info->literal_name);
- } else {
- fprintf(stderr, "Error %d is missing from error info list.\n", expected_code);
- }
- AWS_FATAL_ASSERT(0);
- }
- }
-#endif /* DEBUG_BUILD */
-
- ERROR_SLOTS[slot_index] = error_info;
-}
-
-void aws_unregister_error_info(const struct aws_error_info_list *error_info) {
- AWS_FATAL_ASSERT(error_info);
- AWS_FATAL_ASSERT(error_info->error_list);
- AWS_FATAL_ASSERT(error_info->count);
-
- const int min_range = error_info->error_list[0].error_code;
- const int slot_index = min_range >> AWS_ERROR_ENUM_STRIDE_BITS;
-
- if (slot_index >= AWS_PACKAGE_SLOTS || slot_index < 0) {
+ AWS_FATAL_ASSERT(error_info);
+ AWS_FATAL_ASSERT(error_info->error_list);
+ AWS_FATAL_ASSERT(error_info->count);
+
+ const int min_range = error_info->error_list[0].error_code;
+ const int slot_index = min_range >> AWS_ERROR_ENUM_STRIDE_BITS;
+
+ if (slot_index >= AWS_PACKAGE_SLOTS || slot_index < 0) {
+ /* This is an NDEBUG build apparently. Kill the process rather than
+ * corrupting heap. */
+ fprintf(stderr, "Bad error slot index %d\n", slot_index);
+ AWS_FATAL_ASSERT(false);
+ }
+
+#if DEBUG_BUILD
+ /* Assert that error info entries are in the right order. */
+ for (int i = 1; i < error_info->count; ++i) {
+ const int expected_code = min_range + i;
+ const struct aws_error_info *info = &error_info->error_list[i];
+ if (info->error_code != expected_code) {
+ if (info->error_code) {
+ fprintf(stderr, "Error %s is at wrong index of error info list.\n", info->literal_name);
+ } else {
+ fprintf(stderr, "Error %d is missing from error info list.\n", expected_code);
+ }
+ AWS_FATAL_ASSERT(0);
+ }
+ }
+#endif /* DEBUG_BUILD */
+
+ ERROR_SLOTS[slot_index] = error_info;
+}
+
+void aws_unregister_error_info(const struct aws_error_info_list *error_info) {
+ AWS_FATAL_ASSERT(error_info);
+ AWS_FATAL_ASSERT(error_info->error_list);
+ AWS_FATAL_ASSERT(error_info->count);
+
+ const int min_range = error_info->error_list[0].error_code;
+ const int slot_index = min_range >> AWS_ERROR_ENUM_STRIDE_BITS;
+
+ if (slot_index >= AWS_PACKAGE_SLOTS || slot_index < 0) {
/* This is an NDEBUG build apparently. Kill the process rather than
* corrupting heap. */
- fprintf(stderr, "Bad error slot index %d\n", slot_index);
- AWS_FATAL_ASSERT(0);
+ fprintf(stderr, "Bad error slot index %d\n", slot_index);
+ AWS_FATAL_ASSERT(0);
}
- ERROR_SLOTS[slot_index] = NULL;
+ ERROR_SLOTS[slot_index] = NULL;
+}
+
+int aws_translate_and_raise_io_error(int error_no) {
+ switch (error_no) {
+ case EINVAL:
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ case ESPIPE:
+ return aws_raise_error(AWS_ERROR_STREAM_UNSEEKABLE);
+ case EPERM:
+ case EACCES:
+ return aws_raise_error(AWS_ERROR_NO_PERMISSION);
+ case EISDIR:
+ case ENAMETOOLONG:
+ case ENOENT:
+ return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
+ case ENFILE:
+ return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
+ case ENOMEM:
+ return aws_raise_error(AWS_ERROR_OOM);
+ default:
+ return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
+ }
}
-
-int aws_translate_and_raise_io_error(int error_no) {
- switch (error_no) {
- case EINVAL:
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- case ESPIPE:
- return aws_raise_error(AWS_ERROR_STREAM_UNSEEKABLE);
- case EPERM:
- case EACCES:
- return aws_raise_error(AWS_ERROR_NO_PERMISSION);
- case EISDIR:
- case ENAMETOOLONG:
- case ENOENT:
- return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
- case ENFILE:
- return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
- case ENOMEM:
- return aws_raise_error(AWS_ERROR_OOM);
- default:
- return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
- }
-}
diff --git a/contrib/restricted/aws/aws-c-common/source/fifo_cache.c b/contrib/restricted/aws/aws-c-common/source/fifo_cache.c
index a70a6c8a2a..9627c5ad63 100644
--- a/contrib/restricted/aws/aws-c-common/source/fifo_cache.c
+++ b/contrib/restricted/aws/aws-c-common/source/fifo_cache.c
@@ -1,59 +1,59 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-#include <aws/common/fifo_cache.h>
-
-static int s_fifo_cache_put(struct aws_cache *cache, const void *key, void *p_value);
-
-static struct aws_cache_vtable s_fifo_cache_vtable = {
- .destroy = aws_cache_base_default_destroy,
- .find = aws_cache_base_default_find,
- .put = s_fifo_cache_put,
- .remove = aws_cache_base_default_remove,
- .clear = aws_cache_base_default_clear,
- .get_element_count = aws_cache_base_default_get_element_count,
-};
-
-struct aws_cache *aws_cache_new_fifo(
- struct aws_allocator *allocator,
- aws_hash_fn *hash_fn,
- aws_hash_callback_eq_fn *equals_fn,
- aws_hash_callback_destroy_fn *destroy_key_fn,
- aws_hash_callback_destroy_fn *destroy_value_fn,
- size_t max_items) {
- AWS_ASSERT(allocator);
- AWS_ASSERT(max_items);
-
- struct aws_cache *fifo_cache = aws_mem_calloc(allocator, 1, sizeof(struct aws_cache));
- if (!fifo_cache) {
- return NULL;
- }
- fifo_cache->allocator = allocator;
- fifo_cache->max_items = max_items;
- fifo_cache->vtable = &s_fifo_cache_vtable;
- if (aws_linked_hash_table_init(
- &fifo_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) {
- return NULL;
- }
- return fifo_cache;
-}
-
-/* fifo cache put implementation */
-static int s_fifo_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
- if (aws_linked_hash_table_put(&cache->table, key, p_value)) {
- return AWS_OP_ERR;
- }
-
- /* Manage the space if we actually added a new element and the cache is full. */
- if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) {
- /* we're over the cache size limit. Remove whatever is in the front of
- * the linked_hash_table, which is the oldest element */
- const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
- struct aws_linked_list_node *node = aws_linked_list_front(list);
- struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
- return aws_linked_hash_table_remove(&cache->table, table_node->key);
- }
-
- return AWS_OP_SUCCESS;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/fifo_cache.h>
+
+static int s_fifo_cache_put(struct aws_cache *cache, const void *key, void *p_value);
+
+static struct aws_cache_vtable s_fifo_cache_vtable = {
+ .destroy = aws_cache_base_default_destroy,
+ .find = aws_cache_base_default_find,
+ .put = s_fifo_cache_put,
+ .remove = aws_cache_base_default_remove,
+ .clear = aws_cache_base_default_clear,
+ .get_element_count = aws_cache_base_default_get_element_count,
+};
+
+struct aws_cache *aws_cache_new_fifo(
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_key_fn,
+ aws_hash_callback_destroy_fn *destroy_value_fn,
+ size_t max_items) {
+ AWS_ASSERT(allocator);
+ AWS_ASSERT(max_items);
+
+ struct aws_cache *fifo_cache = aws_mem_calloc(allocator, 1, sizeof(struct aws_cache));
+ if (!fifo_cache) {
+ return NULL;
+ }
+ fifo_cache->allocator = allocator;
+ fifo_cache->max_items = max_items;
+ fifo_cache->vtable = &s_fifo_cache_vtable;
+ if (aws_linked_hash_table_init(
+ &fifo_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) {
+ return NULL;
+ }
+ return fifo_cache;
+}
+
+/* fifo cache put implementation */
+static int s_fifo_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
+ if (aws_linked_hash_table_put(&cache->table, key, p_value)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Manage the space if we actually added a new element and the cache is full. */
+ if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) {
+ /* we're over the cache size limit. Remove whatever is in the front of
+ * the linked_hash_table, which is the oldest element */
+ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
+ struct aws_linked_list_node *node = aws_linked_list_front(list);
+ struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
+ return aws_linked_hash_table_remove(&cache->table, table_node->key);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/hash_table.c b/contrib/restricted/aws/aws-c-common/source/hash_table.c
index a67043ba35..a8125a2df1 100644
--- a/contrib/restricted/aws/aws-c-common/source/hash_table.c
+++ b/contrib/restricted/aws/aws-c-common/source/hash_table.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
/* For more information on how the RH hash works and in particular how we do
@@ -19,7 +19,7 @@
/* Include lookup3.c so we can (potentially) inline it and make use of the mix()
* macro. */
-#include <aws/common/private/lookup3.inl>
+#include <aws/common/private/lookup3.inl>
static void s_suppress_unused_lookup3_func_warnings(void) {
/* We avoid making changes to lookup3 if we can avoid it, but since it has functions
@@ -31,56 +31,56 @@ static void s_suppress_unused_lookup3_func_warnings(void) {
(void)hashbig;
}
-/**
- * Calculate the hash for the given key.
- * Ensures a reasonable semantics for null keys.
- * Ensures that no object ever hashes to 0, which is the sentinal value for an empty hash element.
- */
+/**
+ * Calculate the hash for the given key.
+ * Ensures a reasonable semantics for null keys.
+ * Ensures that no object ever hashes to 0, which is the sentinal value for an empty hash element.
+ */
static uint64_t s_hash_for(struct hash_table_state *state, const void *key) {
- AWS_PRECONDITION(hash_table_state_is_valid(state));
+ AWS_PRECONDITION(hash_table_state_is_valid(state));
s_suppress_unused_lookup3_func_warnings();
- if (key == NULL) {
- /* The best answer */
- return 42;
- }
-
+ if (key == NULL) {
+ /* The best answer */
+ return 42;
+ }
+
uint64_t hash_code = state->hash_fn(key);
if (!hash_code) {
hash_code = 1;
}
- AWS_RETURN_WITH_POSTCONDITION(hash_code, hash_code != 0);
+ AWS_RETURN_WITH_POSTCONDITION(hash_code, hash_code != 0);
+}
+
+/**
+ * Check equality of two objects, with a reasonable semantics for null.
+ */
+static bool s_safe_eq_check(aws_hash_callback_eq_fn *equals_fn, const void *a, const void *b) {
+ /* Short circuit if the pointers are the same */
+ if (a == b) {
+ return true;
+ }
+ /* If one but not both are null, the objects are not equal */
+ if (a == NULL || b == NULL) {
+ return false;
+ }
+ /* If both are non-null, call the underlying equals fn */
+ return equals_fn(a, b);
+}
+
+/**
+ * Check equality of two hash keys, with a reasonable semantics for null keys.
+ */
+static bool s_hash_keys_eq(struct hash_table_state *state, const void *a, const void *b) {
+ AWS_PRECONDITION(hash_table_state_is_valid(state));
+ bool rval = s_safe_eq_check(state->equals_fn, a, b);
+ AWS_RETURN_WITH_POSTCONDITION(rval, hash_table_state_is_valid(state));
}
-/**
- * Check equality of two objects, with a reasonable semantics for null.
- */
-static bool s_safe_eq_check(aws_hash_callback_eq_fn *equals_fn, const void *a, const void *b) {
- /* Short circuit if the pointers are the same */
- if (a == b) {
- return true;
- }
- /* If one but not both are null, the objects are not equal */
- if (a == NULL || b == NULL) {
- return false;
- }
- /* If both are non-null, call the underlying equals fn */
- return equals_fn(a, b);
-}
-
-/**
- * Check equality of two hash keys, with a reasonable semantics for null keys.
- */
-static bool s_hash_keys_eq(struct hash_table_state *state, const void *a, const void *b) {
- AWS_PRECONDITION(hash_table_state_is_valid(state));
- bool rval = s_safe_eq_check(state->equals_fn, a, b);
- AWS_RETURN_WITH_POSTCONDITION(rval, hash_table_state_is_valid(state));
-}
-
static size_t s_index_for(struct hash_table_state *map, struct hash_table_entry *entry) {
AWS_PRECONDITION(hash_table_state_is_valid(map));
size_t index = entry - map->slots;
- AWS_RETURN_WITH_POSTCONDITION(index, index < map->size && hash_table_state_is_valid(map));
+ AWS_RETURN_WITH_POSTCONDITION(index, index < map->size && hash_table_state_is_valid(map));
}
#if 0
@@ -218,10 +218,10 @@ int aws_hash_table_init(
aws_hash_callback_eq_fn *equals_fn,
aws_hash_callback_destroy_fn *destroy_key_fn,
aws_hash_callback_destroy_fn *destroy_value_fn) {
- AWS_PRECONDITION(map != NULL);
- AWS_PRECONDITION(alloc != NULL);
- AWS_PRECONDITION(hash_fn != NULL);
- AWS_PRECONDITION(equals_fn != NULL);
+ AWS_PRECONDITION(map != NULL);
+ AWS_PRECONDITION(alloc != NULL);
+ AWS_PRECONDITION(hash_fn != NULL);
+ AWS_PRECONDITION(equals_fn != NULL);
struct hash_table_state template;
template.hash_fn = hash_fn;
@@ -242,15 +242,15 @@ int aws_hash_table_init(
return AWS_OP_ERR;
}
- AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
+ AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
}
void aws_hash_table_clean_up(struct aws_hash_table *map) {
- AWS_PRECONDITION(map != NULL);
- AWS_PRECONDITION(
- map->p_impl == NULL || aws_hash_table_is_valid(map),
- "Input aws_hash_table [map] must be valid or hash_table_state pointer [map->p_impl] must be NULL, in case "
- "aws_hash_table_clean_up was called twice.");
+ AWS_PRECONDITION(map != NULL);
+ AWS_PRECONDITION(
+ map->p_impl == NULL || aws_hash_table_is_valid(map),
+ "Input aws_hash_table [map] must be valid or hash_table_state pointer [map->p_impl] must be NULL, in case "
+ "aws_hash_table_clean_up was called twice.");
struct hash_table_state *state = map->p_impl;
/* Ensure that we're idempotent */
@@ -259,10 +259,10 @@ void aws_hash_table_clean_up(struct aws_hash_table *map) {
}
aws_hash_table_clear(map);
- aws_mem_release(map->p_impl->alloc, map->p_impl);
+ aws_mem_release(map->p_impl->alloc, map->p_impl);
map->p_impl = NULL;
- AWS_POSTCONDITION(map->p_impl == NULL);
+ AWS_POSTCONDITION(map->p_impl == NULL);
}
void aws_hash_table_swap(struct aws_hash_table *AWS_RESTRICT a, struct aws_hash_table *AWS_RESTRICT b) {
@@ -273,11 +273,11 @@ void aws_hash_table_swap(struct aws_hash_table *AWS_RESTRICT a, struct aws_hash_
}
void aws_hash_table_move(struct aws_hash_table *AWS_RESTRICT to, struct aws_hash_table *AWS_RESTRICT from) {
- AWS_PRECONDITION(to != NULL);
- AWS_PRECONDITION(from != NULL);
- AWS_PRECONDITION(to != from);
- AWS_PRECONDITION(aws_hash_table_is_valid(from));
-
+ AWS_PRECONDITION(to != NULL);
+ AWS_PRECONDITION(from != NULL);
+ AWS_PRECONDITION(to != from);
+ AWS_PRECONDITION(aws_hash_table_is_valid(from));
+
*to = *from;
AWS_ZERO_STRUCT(*from);
AWS_POSTCONDITION(aws_hash_table_is_valid(to));
@@ -317,7 +317,7 @@ static int inline s_find_entry(
return AWS_ERROR_HASHTBL_ITEM_NOT_FOUND;
}
- if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) {
+ if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) {
if (p_probe_idx) {
*p_probe_idx = 0;
}
@@ -341,38 +341,38 @@ static int s_find_entry1(
int rv;
struct hash_table_entry *entry;
- /* This loop is guaranteed to terminate because entry_probe is bounded above by state->mask (i.e. state->size - 1).
- * Since probe_idx increments every loop iteration, it will become larger than entry_probe after at most state->size
- * transitions and the loop will exit (if it hasn't already)
- */
- while (1) {
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
+ /* This loop is guaranteed to terminate because entry_probe is bounded above by state->mask (i.e. state->size - 1).
+ * Since probe_idx increments every loop iteration, it will become larger than entry_probe after at most state->size
+ * transitions and the loop will exit (if it hasn't already)
+ */
+ while (1) {
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
uint64_t index = (hash_code + probe_idx) & state->mask;
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
entry = &state->slots[index];
if (!entry->hash_code) {
rv = AWS_ERROR_HASHTBL_ITEM_NOT_FOUND;
break;
}
- if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) {
+ if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) {
rv = AWS_ERROR_SUCCESS;
break;
}
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
uint64_t entry_probe = (index - entry->hash_code) & state->mask;
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
if (entry_probe < probe_idx) {
/* We now know that our target entry cannot exist; if it did exist,
@@ -385,7 +385,7 @@ static int s_find_entry1(
}
probe_idx++;
- }
+ }
*p_entry = entry;
if (p_probe_idx) {
@@ -396,9 +396,9 @@ static int s_find_entry1(
}
int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem) {
- AWS_PRECONDITION(aws_hash_table_is_valid(map));
- AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(p_elem), "Input aws_hash_element pointer [p_elem] must be writable.");
-
+ AWS_PRECONDITION(aws_hash_table_is_valid(map));
+ AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(p_elem), "Input aws_hash_element pointer [p_elem] must be writable.");
+
struct hash_table_state *state = map->p_impl;
uint64_t hash_code = s_hash_for(state, key);
struct hash_table_entry *entry;
@@ -410,54 +410,54 @@ int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struc
} else {
*p_elem = NULL;
}
- AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
+ AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
}
-/**
- * Attempts to find a home for the given entry.
- * If the entry was empty (i.e. hash-code of 0), then the function does nothing and returns NULL
- * Otherwise, it emplaces the item, and returns a pointer to the newly emplaced entry.
- * This function is only called after the hash-table has been expanded to fit the new element,
- * so it should never fail.
+/**
+ * Attempts to find a home for the given entry.
+ * If the entry was empty (i.e. hash-code of 0), then the function does nothing and returns NULL
+ * Otherwise, it emplaces the item, and returns a pointer to the newly emplaced entry.
+ * This function is only called after the hash-table has been expanded to fit the new element,
+ * so it should never fail.
*/
static struct hash_table_entry *s_emplace_item(
struct hash_table_state *state,
struct hash_table_entry entry,
size_t probe_idx) {
- AWS_PRECONDITION(hash_table_state_is_valid(state));
-
- if (entry.hash_code == 0) {
- AWS_RETURN_WITH_POSTCONDITION(NULL, hash_table_state_is_valid(state));
- }
-
- struct hash_table_entry *rval = NULL;
-
- /* Since a valid hash_table has at least one empty element, this loop will always terminate in at most linear time
- */
- while (entry.hash_code != 0) {
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
+ AWS_PRECONDITION(hash_table_state_is_valid(state));
+
+ if (entry.hash_code == 0) {
+ AWS_RETURN_WITH_POSTCONDITION(NULL, hash_table_state_is_valid(state));
+ }
+
+ struct hash_table_entry *rval = NULL;
+
+ /* Since a valid hash_table has at least one empty element, this loop will always terminate in at most linear time
+ */
+ while (entry.hash_code != 0) {
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
size_t index = (size_t)(entry.hash_code + probe_idx) & state->mask;
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
struct hash_table_entry *victim = &state->slots[index];
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
size_t victim_probe_idx = (size_t)(index - victim->hash_code) & state->mask;
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
if (!victim->hash_code || victim_probe_idx < probe_idx) {
- /* The first thing we emplace is the entry itself. A pointer to its location becomes the rval */
- if (!rval) {
- rval = victim;
+ /* The first thing we emplace is the entry itself. A pointer to its location becomes the rval */
+ if (!rval) {
+ rval = victim;
}
struct hash_table_entry tmp = *victim;
@@ -470,25 +470,25 @@ static struct hash_table_entry *s_emplace_item(
}
}
- AWS_RETURN_WITH_POSTCONDITION(
- rval,
- hash_table_state_is_valid(state) && rval >= &state->slots[0] && rval < &state->slots[state->size],
- "Output hash_table_entry pointer [rval] must point in the slots of [state].");
+ AWS_RETURN_WITH_POSTCONDITION(
+ rval,
+ hash_table_state_is_valid(state) && rval >= &state->slots[0] && rval < &state->slots[state->size],
+ "Output hash_table_entry pointer [rval] must point in the slots of [state].");
}
static int s_expand_table(struct aws_hash_table *map) {
struct hash_table_state *old_state = map->p_impl;
struct hash_table_state template = *old_state;
- size_t new_size;
- if (aws_mul_size_checked(template.size, 2, &new_size)) {
- return AWS_OP_ERR;
- }
+ size_t new_size;
+ if (aws_mul_size_checked(template.size, 2, &new_size)) {
+ return AWS_OP_ERR;
+ }
+
+ if (s_update_template_size(&template, new_size)) {
+ return AWS_OP_ERR;
+ }
- if (s_update_template_size(&template, new_size)) {
- return AWS_OP_ERR;
- }
-
struct hash_table_state *new_state = s_alloc_state(&template);
if (!new_state) {
return AWS_OP_ERR;
@@ -614,9 +614,9 @@ int aws_hash_table_put(struct aws_hash_table *map, const void *key, void *value,
static size_t s_remove_entry(struct hash_table_state *state, struct hash_table_entry *entry) {
AWS_PRECONDITION(hash_table_state_is_valid(state));
AWS_PRECONDITION(state->entry_count > 0);
- AWS_PRECONDITION(
- entry >= &state->slots[0] && entry < &state->slots[state->size],
- "Input hash_table_entry [entry] pointer must point in the available slots.");
+ AWS_PRECONDITION(
+ entry >= &state->slots[0] && entry < &state->slots[state->size],
+ "Input hash_table_entry [entry] pointer must point in the available slots.");
state->entry_count--;
/* Shift subsequent entries back until we find an entry that belongs at its
@@ -648,7 +648,7 @@ static size_t s_remove_entry(struct hash_table_state *state, struct hash_table_e
/* Clear the entry we shifted out of */
AWS_ZERO_STRUCT(state->slots[index]);
- AWS_RETURN_WITH_POSTCONDITION(index, hash_table_state_is_valid(state) && index <= state->size);
+ AWS_RETURN_WITH_POSTCONDITION(index, hash_table_state_is_valid(state) && index <= state->size);
}
int aws_hash_table_remove(
@@ -656,12 +656,12 @@ int aws_hash_table_remove(
const void *key,
struct aws_hash_element *p_value,
int *was_present) {
- AWS_PRECONDITION(aws_hash_table_is_valid(map));
- AWS_PRECONDITION(
- p_value == NULL || AWS_OBJECT_PTR_IS_WRITABLE(p_value), "Input pointer [p_value] must be NULL or writable.");
- AWS_PRECONDITION(
- was_present == NULL || AWS_OBJECT_PTR_IS_WRITABLE(was_present),
- "Input pointer [was_present] must be NULL or writable.");
+ AWS_PRECONDITION(aws_hash_table_is_valid(map));
+ AWS_PRECONDITION(
+ p_value == NULL || AWS_OBJECT_PTR_IS_WRITABLE(p_value), "Input pointer [p_value] must be NULL or writable.");
+ AWS_PRECONDITION(
+ was_present == NULL || AWS_OBJECT_PTR_IS_WRITABLE(was_present),
+ "Input pointer [was_present] must be NULL or writable.");
struct hash_table_state *state = map->p_impl;
uint64_t hash_code = s_hash_for(state, key);
@@ -676,7 +676,7 @@ int aws_hash_table_remove(
if (rv != AWS_ERROR_SUCCESS) {
*was_present = 0;
- AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
+ AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
}
*was_present = 1;
@@ -693,21 +693,21 @@ int aws_hash_table_remove(
}
s_remove_entry(state, entry);
- AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
+ AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
+}
+
+int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_element *p_value) {
+ AWS_PRECONDITION(aws_hash_table_is_valid(map));
+ AWS_PRECONDITION(p_value != NULL);
+
+ struct hash_table_state *state = map->p_impl;
+ struct hash_table_entry *entry = AWS_CONTAINER_OF(p_value, struct hash_table_entry, element);
+
+ s_remove_entry(state, entry);
+
+ AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
}
-int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_element *p_value) {
- AWS_PRECONDITION(aws_hash_table_is_valid(map));
- AWS_PRECONDITION(p_value != NULL);
-
- struct hash_table_state *state = map->p_impl;
- struct hash_table_entry *entry = AWS_CONTAINER_OF(p_value, struct hash_table_entry, element);
-
- s_remove_entry(state, entry);
-
- AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
-}
-
int aws_hash_table_foreach(
struct aws_hash_table *map,
int (*callback)(void *context, struct aws_hash_element *pElement),
@@ -732,12 +732,12 @@ bool aws_hash_table_eq(
const struct aws_hash_table *a,
const struct aws_hash_table *b,
aws_hash_callback_eq_fn *value_eq) {
- AWS_PRECONDITION(aws_hash_table_is_valid(a));
- AWS_PRECONDITION(aws_hash_table_is_valid(b));
- AWS_PRECONDITION(value_eq != NULL);
-
+ AWS_PRECONDITION(aws_hash_table_is_valid(a));
+ AWS_PRECONDITION(aws_hash_table_is_valid(b));
+ AWS_PRECONDITION(value_eq != NULL);
+
if (aws_hash_table_get_entry_count(a) != aws_hash_table_get_entry_count(b)) {
- AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
+ AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
}
/*
@@ -745,26 +745,26 @@ bool aws_hash_table_eq(
* entries, we can simply iterate one and compare against the same key in
* the other.
*/
- for (size_t i = 0; i < a->p_impl->size; ++i) {
- const struct hash_table_entry *const a_entry = &a->p_impl->slots[i];
- if (a_entry->hash_code == 0) {
- continue;
- }
-
+ for (size_t i = 0; i < a->p_impl->size; ++i) {
+ const struct hash_table_entry *const a_entry = &a->p_impl->slots[i];
+ if (a_entry->hash_code == 0) {
+ continue;
+ }
+
struct aws_hash_element *b_element = NULL;
- aws_hash_table_find(b, a_entry->element.key, &b_element);
+ aws_hash_table_find(b, a_entry->element.key, &b_element);
if (!b_element) {
/* Key is present in A only */
- AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
+ AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
}
- if (!s_safe_eq_check(value_eq, a_entry->element.value, b_element->value)) {
- AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
+ if (!s_safe_eq_check(value_eq, a_entry->element.value, b_element->value)) {
+ AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
}
}
- AWS_RETURN_WITH_POSTCONDITION(true, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
+ AWS_RETURN_WITH_POSTCONDITION(true, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
}
/**
@@ -778,7 +778,7 @@ bool aws_hash_table_eq(
*/
static inline void s_get_next_element(struct aws_hash_iter *iter, size_t start_slot) {
AWS_PRECONDITION(iter != NULL);
- AWS_PRECONDITION(aws_hash_table_is_valid(iter->map));
+ AWS_PRECONDITION(aws_hash_table_is_valid(iter->map));
struct hash_table_state *state = iter->map->p_impl;
size_t limit = iter->limit;
@@ -803,22 +803,22 @@ struct aws_hash_iter aws_hash_iter_begin(const struct aws_hash_table *map) {
AWS_PRECONDITION(aws_hash_table_is_valid(map));
struct hash_table_state *state = map->p_impl;
struct aws_hash_iter iter;
- AWS_ZERO_STRUCT(iter);
+ AWS_ZERO_STRUCT(iter);
iter.map = map;
iter.limit = state->size;
s_get_next_element(&iter, 0);
- AWS_RETURN_WITH_POSTCONDITION(
- iter,
- aws_hash_iter_is_valid(&iter) &&
- (iter.status == AWS_HASH_ITER_STATUS_DONE || iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE),
- "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
+ AWS_RETURN_WITH_POSTCONDITION(
+ iter,
+ aws_hash_iter_is_valid(&iter) &&
+ (iter.status == AWS_HASH_ITER_STATUS_DONE || iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE),
+ "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
}
bool aws_hash_iter_done(const struct aws_hash_iter *iter) {
AWS_PRECONDITION(aws_hash_iter_is_valid(iter));
- AWS_PRECONDITION(
- iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
- "Input aws_hash_iter [iter] must either be done, or ready to use.");
+ AWS_PRECONDITION(
+ iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
+ "Input aws_hash_iter [iter] must either be done, or ready to use.");
/*
* SIZE_MAX is a valid (non-terminal) value for iter->slot in the event that
* we delete slot 0. See comments in aws_hash_iter_delete.
@@ -826,39 +826,39 @@ bool aws_hash_iter_done(const struct aws_hash_iter *iter) {
* As such we must use == rather than >= here.
*/
bool rval = (iter->slot == iter->limit);
- AWS_POSTCONDITION(
- iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
- "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
- AWS_POSTCONDITION(
- rval == (iter->status == AWS_HASH_ITER_STATUS_DONE),
- "Output bool [rval] must be true if and only if the status of [iter] is DONE.");
+ AWS_POSTCONDITION(
+ iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
+ "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
+ AWS_POSTCONDITION(
+ rval == (iter->status == AWS_HASH_ITER_STATUS_DONE),
+ "Output bool [rval] must be true if and only if the status of [iter] is DONE.");
AWS_POSTCONDITION(aws_hash_iter_is_valid(iter));
return rval;
}
void aws_hash_iter_next(struct aws_hash_iter *iter) {
AWS_PRECONDITION(aws_hash_iter_is_valid(iter));
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
s_get_next_element(iter, iter->slot + 1);
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
- AWS_POSTCONDITION(
- iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
- "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
+ AWS_POSTCONDITION(
+ iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
+ "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
AWS_POSTCONDITION(aws_hash_iter_is_valid(iter));
}
void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents) {
- AWS_PRECONDITION(
- iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] must be ready for use.");
+ AWS_PRECONDITION(
+ iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] must be ready for use.");
AWS_PRECONDITION(aws_hash_iter_is_valid(iter));
- AWS_PRECONDITION(
- iter->map->p_impl->entry_count > 0,
- "The hash_table_state pointed by input [iter] must contain at least one entry.");
+ AWS_PRECONDITION(
+ iter->map->p_impl->entry_count > 0,
+ "The hash_table_state pointed by input [iter] must contain at least one entry.");
struct hash_table_state *state = iter->map->p_impl;
if (destroy_contents) {
@@ -897,36 +897,36 @@ void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents) {
* underflowing to SIZE_MAX; we have to take care in aws_hash_iter_done to avoid
* treating this as an end-of-iteration condition.
*/
-#ifdef CBMC
-# pragma CPROVER check push
-# pragma CPROVER check disable "unsigned-overflow"
-#endif
+#ifdef CBMC
+# pragma CPROVER check push
+# pragma CPROVER check disable "unsigned-overflow"
+#endif
iter->slot--;
-#ifdef CBMC
-# pragma CPROVER check pop
-#endif
+#ifdef CBMC
+# pragma CPROVER check pop
+#endif
iter->status = AWS_HASH_ITER_STATUS_DELETE_CALLED;
- AWS_POSTCONDITION(
- iter->status == AWS_HASH_ITER_STATUS_DELETE_CALLED,
- "The status of output aws_hash_iter [iter] must be DELETE_CALLED.");
+ AWS_POSTCONDITION(
+ iter->status == AWS_HASH_ITER_STATUS_DELETE_CALLED,
+ "The status of output aws_hash_iter [iter] must be DELETE_CALLED.");
AWS_POSTCONDITION(aws_hash_iter_is_valid(iter));
}
void aws_hash_table_clear(struct aws_hash_table *map) {
- AWS_PRECONDITION(aws_hash_table_is_valid(map));
+ AWS_PRECONDITION(aws_hash_table_is_valid(map));
struct hash_table_state *state = map->p_impl;
-
- /* Check that we have at least one destructor before iterating over the table */
- if (state->destroy_key_fn || state->destroy_value_fn) {
- for (size_t i = 0; i < state->size; ++i) {
- struct hash_table_entry *entry = &state->slots[i];
- if (!entry->hash_code) {
- continue;
+
+ /* Check that we have at least one destructor before iterating over the table */
+ if (state->destroy_key_fn || state->destroy_value_fn) {
+ for (size_t i = 0; i < state->size; ++i) {
+ struct hash_table_entry *entry = &state->slots[i];
+ if (!entry->hash_code) {
+ continue;
}
- if (state->destroy_key_fn) {
- state->destroy_key_fn((void *)entry->element.key);
+ if (state->destroy_key_fn) {
+ state->destroy_key_fn((void *)entry->element.key);
}
- if (state->destroy_value_fn) {
+ if (state->destroy_value_fn) {
state->destroy_value_fn(entry->element.value);
}
}
@@ -936,11 +936,11 @@ void aws_hash_table_clear(struct aws_hash_table *map) {
memset(state->slots, 0, sizeof(*state->slots) * state->size);
state->entry_count = 0;
- AWS_POSTCONDITION(aws_hash_table_is_valid(map));
+ AWS_POSTCONDITION(aws_hash_table_is_valid(map));
}
uint64_t aws_hash_c_string(const void *item) {
- AWS_PRECONDITION(aws_c_string_is_valid(item));
+ AWS_PRECONDITION(aws_c_string_is_valid(item));
const char *str = item;
/* first digits of pi in hex */
@@ -951,27 +951,27 @@ uint64_t aws_hash_c_string(const void *item) {
}
uint64_t aws_hash_string(const void *item) {
- AWS_PRECONDITION(aws_string_is_valid(item));
+ AWS_PRECONDITION(aws_string_is_valid(item));
const struct aws_string *str = item;
/* first digits of pi in hex */
uint32_t b = 0x3243F6A8, c = 0x885A308D;
hashlittle2(aws_string_bytes(str), str->len, &c, &b);
- AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_string_is_valid(str));
+ AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_string_is_valid(str));
}
uint64_t aws_hash_byte_cursor_ptr(const void *item) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(item));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(item));
const struct aws_byte_cursor *cur = item;
/* first digits of pi in hex */
uint32_t b = 0x3243F6A8, c = 0x885A308D;
hashlittle2(cur->ptr, cur->len, &c, &b);
- AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_byte_cursor_is_valid(cur));
+ AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_byte_cursor_is_valid(cur));
}
uint64_t aws_hash_ptr(const void *item) {
- /* Since the numeric value of the pointer is considered, not the memory behind it, 0 is an acceptable value */
+ /* Since the numeric value of the pointer is considered, not the memory behind it, 0 is an acceptable value */
/* first digits of e in hex
* 2.b7e 1516 28ae d2a6 */
uint32_t b = 0x2b7e1516, c = 0x28aed2a6;
@@ -981,26 +981,26 @@ uint64_t aws_hash_ptr(const void *item) {
return ((uint64_t)b << 32) | c;
}
-uint64_t aws_hash_combine(uint64_t item1, uint64_t item2) {
- uint32_t b = item2 & 0xFFFFFFFF; /* LSB */
- uint32_t c = item2 >> 32; /* MSB */
-
- hashlittle2(&item1, sizeof(item1), &c, &b);
- return ((uint64_t)b << 32) | c;
-}
-
+uint64_t aws_hash_combine(uint64_t item1, uint64_t item2) {
+ uint32_t b = item2 & 0xFFFFFFFF; /* LSB */
+ uint32_t c = item2 >> 32; /* MSB */
+
+ hashlittle2(&item1, sizeof(item1), &c, &b);
+ return ((uint64_t)b << 32) | c;
+}
+
bool aws_hash_callback_c_str_eq(const void *a, const void *b) {
AWS_PRECONDITION(aws_c_string_is_valid(a));
AWS_PRECONDITION(aws_c_string_is_valid(b));
bool rval = !strcmp(a, b);
- AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
+ AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
}
bool aws_hash_callback_string_eq(const void *a, const void *b) {
AWS_PRECONDITION(aws_string_is_valid(a));
AWS_PRECONDITION(aws_string_is_valid(b));
bool rval = aws_string_eq(a, b);
- AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
+ AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
}
void aws_hash_callback_string_destroy(void *a) {
@@ -1011,90 +1011,90 @@ void aws_hash_callback_string_destroy(void *a) {
bool aws_ptr_eq(const void *a, const void *b) {
return a == b;
}
-
-/**
- * Best-effort check of hash_table_state data-structure invariants
- * Some invariants, such as that the number of entries is actually the
- * same as the entry_count field, would require a loop to check
- */
-bool aws_hash_table_is_valid(const struct aws_hash_table *map) {
- return map && map->p_impl && hash_table_state_is_valid(map->p_impl);
-}
-
-/**
- * Best-effort check of hash_table_state data-structure invariants
- * Some invariants, such as that the number of entries is actually the
- * same as the entry_count field, would require a loop to check
- */
-bool hash_table_state_is_valid(const struct hash_table_state *map) {
- if (!map) {
- return false;
- }
- bool hash_fn_nonnull = (map->hash_fn != NULL);
- bool equals_fn_nonnull = (map->equals_fn != NULL);
- /*destroy_key_fn and destroy_value_fn are both allowed to be NULL*/
- bool alloc_nonnull = (map->alloc != NULL);
- bool size_at_least_two = (map->size >= 2);
- bool size_is_power_of_two = aws_is_power_of_two(map->size);
- bool entry_count = (map->entry_count <= map->max_load);
- bool max_load = (map->max_load < map->size);
- bool mask_is_correct = (map->mask == (map->size - 1));
- bool max_load_factor_bounded = map->max_load_factor == 0.95; //(map->max_load_factor < 1.0);
- bool slots_allocated = AWS_MEM_IS_WRITABLE(&map->slots[0], sizeof(map->slots[0]) * map->size);
-
- return hash_fn_nonnull && equals_fn_nonnull && alloc_nonnull && size_at_least_two && size_is_power_of_two &&
- entry_count && max_load && mask_is_correct && max_load_factor_bounded && slots_allocated;
-}
-
-/**
- * Given a pointer to a hash_iter, checks that it is well-formed, with all data-structure invariants.
- */
-bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter) {
- if (!iter) {
- return false;
- }
- if (!iter->map) {
- return false;
- }
- if (!aws_hash_table_is_valid(iter->map)) {
- return false;
- }
- if (iter->limit > iter->map->p_impl->size) {
- return false;
- }
-
- switch (iter->status) {
- case AWS_HASH_ITER_STATUS_DONE:
- /* Done iff slot == limit */
- return iter->slot == iter->limit;
- case AWS_HASH_ITER_STATUS_DELETE_CALLED:
- /* iter->slot can underflow to SIZE_MAX after a delete
- * see the comments for aws_hash_iter_delete() */
- return iter->slot <= iter->limit || iter->slot == SIZE_MAX;
- case AWS_HASH_ITER_STATUS_READY_FOR_USE:
- /* A slot must point to a valid location (i.e. hash_code != 0) */
- return iter->slot < iter->limit && iter->map->p_impl->slots[iter->slot].hash_code != 0;
- }
- /* Invalid status code */
- return false;
-}
-
-/**
- * Determine the total number of bytes needed for a hash-table with
- * "size" slots. If the result would overflow a size_t, return
- * AWS_OP_ERR; otherwise, return AWS_OP_SUCCESS with the result in
- * "required_bytes".
- */
-int hash_table_state_required_bytes(size_t size, size_t *required_bytes) {
-
- size_t elemsize;
- if (aws_mul_size_checked(size, sizeof(struct hash_table_entry), &elemsize)) {
- return AWS_OP_ERR;
- }
-
- if (aws_add_size_checked(elemsize, sizeof(struct hash_table_state), required_bytes)) {
- return AWS_OP_ERR;
- }
-
- return AWS_OP_SUCCESS;
-}
+
+/**
+ * Best-effort check of hash_table_state data-structure invariants
+ * Some invariants, such as that the number of entries is actually the
+ * same as the entry_count field, would require a loop to check
+ */
+bool aws_hash_table_is_valid(const struct aws_hash_table *map) {
+ return map && map->p_impl && hash_table_state_is_valid(map->p_impl);
+}
+
+/**
+ * Best-effort check of hash_table_state data-structure invariants
+ * Some invariants, such as that the number of entries is actually the
+ * same as the entry_count field, would require a loop to check
+ */
+bool hash_table_state_is_valid(const struct hash_table_state *map) {
+ if (!map) {
+ return false;
+ }
+ bool hash_fn_nonnull = (map->hash_fn != NULL);
+ bool equals_fn_nonnull = (map->equals_fn != NULL);
+ /*destroy_key_fn and destroy_value_fn are both allowed to be NULL*/
+ bool alloc_nonnull = (map->alloc != NULL);
+ bool size_at_least_two = (map->size >= 2);
+ bool size_is_power_of_two = aws_is_power_of_two(map->size);
+ bool entry_count = (map->entry_count <= map->max_load);
+ bool max_load = (map->max_load < map->size);
+ bool mask_is_correct = (map->mask == (map->size - 1));
+ bool max_load_factor_bounded = map->max_load_factor == 0.95; //(map->max_load_factor < 1.0);
+ bool slots_allocated = AWS_MEM_IS_WRITABLE(&map->slots[0], sizeof(map->slots[0]) * map->size);
+
+ return hash_fn_nonnull && equals_fn_nonnull && alloc_nonnull && size_at_least_two && size_is_power_of_two &&
+ entry_count && max_load && mask_is_correct && max_load_factor_bounded && slots_allocated;
+}
+
+/**
+ * Given a pointer to a hash_iter, checks that it is well-formed, with all data-structure invariants.
+ */
+bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter) {
+ if (!iter) {
+ return false;
+ }
+ if (!iter->map) {
+ return false;
+ }
+ if (!aws_hash_table_is_valid(iter->map)) {
+ return false;
+ }
+ if (iter->limit > iter->map->p_impl->size) {
+ return false;
+ }
+
+ switch (iter->status) {
+ case AWS_HASH_ITER_STATUS_DONE:
+ /* Done iff slot == limit */
+ return iter->slot == iter->limit;
+ case AWS_HASH_ITER_STATUS_DELETE_CALLED:
+ /* iter->slot can underflow to SIZE_MAX after a delete
+ * see the comments for aws_hash_iter_delete() */
+ return iter->slot <= iter->limit || iter->slot == SIZE_MAX;
+ case AWS_HASH_ITER_STATUS_READY_FOR_USE:
+ /* A slot must point to a valid location (i.e. hash_code != 0) */
+ return iter->slot < iter->limit && iter->map->p_impl->slots[iter->slot].hash_code != 0;
+ }
+ /* Invalid status code */
+ return false;
+}
+
+/**
+ * Determine the total number of bytes needed for a hash-table with
+ * "size" slots. If the result would overflow a size_t, return
+ * AWS_OP_ERR; otherwise, return AWS_OP_SUCCESS with the result in
+ * "required_bytes".
+ */
+int hash_table_state_required_bytes(size_t size, size_t *required_bytes) {
+
+ size_t elemsize;
+ if (aws_mul_size_checked(size, sizeof(struct hash_table_entry), &elemsize)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_add_size_checked(elemsize, sizeof(struct hash_table_state), required_bytes)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/lifo_cache.c b/contrib/restricted/aws/aws-c-common/source/lifo_cache.c
index a3c6ee7b0b..0612c66aa6 100644
--- a/contrib/restricted/aws/aws-c-common/source/lifo_cache.c
+++ b/contrib/restricted/aws/aws-c-common/source/lifo_cache.c
@@ -1,62 +1,62 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-#include <aws/common/lifo_cache.h>
-static int s_lifo_cache_put(struct aws_cache *cache, const void *key, void *p_value);
-
-static struct aws_cache_vtable s_lifo_cache_vtable = {
- .destroy = aws_cache_base_default_destroy,
- .find = aws_cache_base_default_find,
- .put = s_lifo_cache_put,
- .remove = aws_cache_base_default_remove,
- .clear = aws_cache_base_default_clear,
- .get_element_count = aws_cache_base_default_get_element_count,
-};
-
-struct aws_cache *aws_cache_new_lifo(
- struct aws_allocator *allocator,
- aws_hash_fn *hash_fn,
- aws_hash_callback_eq_fn *equals_fn,
- aws_hash_callback_destroy_fn *destroy_key_fn,
- aws_hash_callback_destroy_fn *destroy_value_fn,
- size_t max_items) {
- AWS_ASSERT(allocator);
- AWS_ASSERT(max_items);
-
- struct aws_cache *lifo_cache = aws_mem_calloc(allocator, 1, sizeof(struct aws_cache));
- if (!lifo_cache) {
- return NULL;
- }
- lifo_cache->allocator = allocator;
- lifo_cache->max_items = max_items;
- lifo_cache->vtable = &s_lifo_cache_vtable;
- if (aws_linked_hash_table_init(
- &lifo_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) {
- return NULL;
- }
- return lifo_cache;
-}
-
-/* lifo cache put implementation */
-static int s_lifo_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
- if (aws_linked_hash_table_put(&cache->table, key, p_value)) {
- return AWS_OP_ERR;
- }
-
- /* Manage the space if we actually added a new element and the cache is full. */
- if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) {
- /* we're over the cache size limit. Remove whatever is in the one before the back of the linked_hash_table,
- * which was the latest element before we put the new one */
- const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
- struct aws_linked_list_node *node = aws_linked_list_back(list);
- if (!node->prev) {
- return AWS_OP_SUCCESS;
- }
- struct aws_linked_hash_table_node *table_node =
- AWS_CONTAINER_OF(node->prev, struct aws_linked_hash_table_node, node);
- return aws_linked_hash_table_remove(&cache->table, table_node->key);
- }
-
- return AWS_OP_SUCCESS;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/lifo_cache.h>
+static int s_lifo_cache_put(struct aws_cache *cache, const void *key, void *p_value);
+
+static struct aws_cache_vtable s_lifo_cache_vtable = {
+ .destroy = aws_cache_base_default_destroy,
+ .find = aws_cache_base_default_find,
+ .put = s_lifo_cache_put,
+ .remove = aws_cache_base_default_remove,
+ .clear = aws_cache_base_default_clear,
+ .get_element_count = aws_cache_base_default_get_element_count,
+};
+
+struct aws_cache *aws_cache_new_lifo(
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_key_fn,
+ aws_hash_callback_destroy_fn *destroy_value_fn,
+ size_t max_items) {
+ AWS_ASSERT(allocator);
+ AWS_ASSERT(max_items);
+
+ struct aws_cache *lifo_cache = aws_mem_calloc(allocator, 1, sizeof(struct aws_cache));
+ if (!lifo_cache) {
+ return NULL;
+ }
+ lifo_cache->allocator = allocator;
+ lifo_cache->max_items = max_items;
+ lifo_cache->vtable = &s_lifo_cache_vtable;
+ if (aws_linked_hash_table_init(
+ &lifo_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) {
+ return NULL;
+ }
+ return lifo_cache;
+}
+
+/* lifo cache put implementation */
+static int s_lifo_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
+ if (aws_linked_hash_table_put(&cache->table, key, p_value)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Manage the space if we actually added a new element and the cache is full. */
+ if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) {
+ /* we're over the cache size limit. Remove whatever is in the one before the back of the linked_hash_table,
+ * which was the latest element before we put the new one */
+ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
+ struct aws_linked_list_node *node = aws_linked_list_back(list);
+ if (!node->prev) {
+ return AWS_OP_SUCCESS;
+ }
+ struct aws_linked_hash_table_node *table_node =
+ AWS_CONTAINER_OF(node->prev, struct aws_linked_hash_table_node, node);
+ return aws_linked_hash_table_remove(&cache->table, table_node->key);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c b/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c
index 1dfb2f6927..42c6a1b530 100644
--- a/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c
+++ b/contrib/restricted/aws/aws-c-common/source/linked_hash_table.c
@@ -1,137 +1,137 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-#include <aws/common/linked_hash_table.h>
-
-static void s_element_destroy(void *value) {
- struct aws_linked_hash_table_node *node = value;
-
- if (node->table->user_on_value_destroy) {
- node->table->user_on_value_destroy(node->value);
- }
-
- aws_linked_list_remove(&node->node);
- aws_mem_release(node->table->allocator, node);
-}
-
-int aws_linked_hash_table_init(
- struct aws_linked_hash_table *table,
- struct aws_allocator *allocator,
- aws_hash_fn *hash_fn,
- aws_hash_callback_eq_fn *equals_fn,
- aws_hash_callback_destroy_fn *destroy_key_fn,
- aws_hash_callback_destroy_fn *destroy_value_fn,
- size_t initial_item_count) {
- AWS_ASSERT(table);
- AWS_ASSERT(allocator);
- AWS_ASSERT(hash_fn);
- AWS_ASSERT(equals_fn);
-
- table->allocator = allocator;
- table->user_on_value_destroy = destroy_value_fn;
-
- aws_linked_list_init(&table->list);
- return aws_hash_table_init(
- &table->table, allocator, initial_item_count, hash_fn, equals_fn, destroy_key_fn, s_element_destroy);
-}
-
-void aws_linked_hash_table_clean_up(struct aws_linked_hash_table *table) {
- /* clearing the table will remove all elements. That will also deallocate
- * any table entries we currently have. */
- aws_hash_table_clean_up(&table->table);
- AWS_ZERO_STRUCT(*table);
-}
-
-int aws_linked_hash_table_find(struct aws_linked_hash_table *table, const void *key, void **p_value) {
-
- struct aws_hash_element *element = NULL;
- int err_val = aws_hash_table_find(&table->table, key, &element);
-
- if (err_val || !element) {
- *p_value = NULL;
- return err_val;
- }
-
- struct aws_linked_hash_table_node *linked_node = element->value;
- *p_value = linked_node->value;
-
- return AWS_OP_SUCCESS;
-}
-
-int aws_linked_hash_table_find_and_move_to_back(struct aws_linked_hash_table *table, const void *key, void **p_value) {
-
- struct aws_hash_element *element = NULL;
- int err_val = aws_hash_table_find(&table->table, key, &element);
-
- if (err_val || !element) {
- *p_value = NULL;
- return err_val;
- }
-
- struct aws_linked_hash_table_node *linked_node = element->value;
- *p_value = linked_node->value;
- /* on access, remove from current place in list and move it to the back. */
- aws_linked_hash_table_move_node_to_end_of_list(table, linked_node);
- return AWS_OP_SUCCESS;
-}
-
-int aws_linked_hash_table_put(struct aws_linked_hash_table *table, const void *key, void *p_value) {
-
- struct aws_linked_hash_table_node *node =
- aws_mem_calloc(table->allocator, 1, sizeof(struct aws_linked_hash_table_node));
-
- if (!node) {
- return AWS_OP_ERR;
- }
-
- struct aws_hash_element *element = NULL;
- int was_added = 0;
- int err_val = aws_hash_table_create(&table->table, key, &element, &was_added);
-
- if (err_val) {
- aws_mem_release(table->allocator, node);
- return err_val;
- }
-
- if (element->value) {
- s_element_destroy(element->value);
- }
-
- node->value = p_value;
- node->key = key;
- node->table = table;
- element->value = node;
-
- aws_linked_list_push_back(&table->list, &node->node);
-
- return AWS_OP_SUCCESS;
-}
-
-int aws_linked_hash_table_remove(struct aws_linked_hash_table *table, const void *key) {
- /* allocated table memory and the linked list entry will be removed in the
- * callback. */
- return aws_hash_table_remove(&table->table, key, NULL, NULL);
-}
-
-void aws_linked_hash_table_clear(struct aws_linked_hash_table *table) {
- /* clearing the table will remove all elements. That will also deallocate
- * any entries we currently have. */
- aws_hash_table_clear(&table->table);
-}
-
-size_t aws_linked_hash_table_get_element_count(const struct aws_linked_hash_table *table) {
- return aws_hash_table_get_entry_count(&table->table);
-}
-
-void aws_linked_hash_table_move_node_to_end_of_list(
- struct aws_linked_hash_table *table,
- struct aws_linked_hash_table_node *node) {
-
- aws_linked_list_remove(&node->node);
- aws_linked_list_push_back(&table->list, &node->node);
-}
-
-const struct aws_linked_list *aws_linked_hash_table_get_iteration_list(const struct aws_linked_hash_table *table) {
- return &table->list;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/linked_hash_table.h>
+
+static void s_element_destroy(void *value) {
+ struct aws_linked_hash_table_node *node = value;
+
+ if (node->table->user_on_value_destroy) {
+ node->table->user_on_value_destroy(node->value);
+ }
+
+ aws_linked_list_remove(&node->node);
+ aws_mem_release(node->table->allocator, node);
+}
+
+int aws_linked_hash_table_init(
+ struct aws_linked_hash_table *table,
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_key_fn,
+ aws_hash_callback_destroy_fn *destroy_value_fn,
+ size_t initial_item_count) {
+ AWS_ASSERT(table);
+ AWS_ASSERT(allocator);
+ AWS_ASSERT(hash_fn);
+ AWS_ASSERT(equals_fn);
+
+ table->allocator = allocator;
+ table->user_on_value_destroy = destroy_value_fn;
+
+ aws_linked_list_init(&table->list);
+ return aws_hash_table_init(
+ &table->table, allocator, initial_item_count, hash_fn, equals_fn, destroy_key_fn, s_element_destroy);
+}
+
+void aws_linked_hash_table_clean_up(struct aws_linked_hash_table *table) {
+ /* clearing the table will remove all elements. That will also deallocate
+ * any table entries we currently have. */
+ aws_hash_table_clean_up(&table->table);
+ AWS_ZERO_STRUCT(*table);
+}
+
+int aws_linked_hash_table_find(struct aws_linked_hash_table *table, const void *key, void **p_value) {
+
+ struct aws_hash_element *element = NULL;
+ int err_val = aws_hash_table_find(&table->table, key, &element);
+
+ if (err_val || !element) {
+ *p_value = NULL;
+ return err_val;
+ }
+
+ struct aws_linked_hash_table_node *linked_node = element->value;
+ *p_value = linked_node->value;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_linked_hash_table_find_and_move_to_back(struct aws_linked_hash_table *table, const void *key, void **p_value) {
+
+ struct aws_hash_element *element = NULL;
+ int err_val = aws_hash_table_find(&table->table, key, &element);
+
+ if (err_val || !element) {
+ *p_value = NULL;
+ return err_val;
+ }
+
+ struct aws_linked_hash_table_node *linked_node = element->value;
+ *p_value = linked_node->value;
+ /* on access, remove from current place in list and move it to the back. */
+ aws_linked_hash_table_move_node_to_end_of_list(table, linked_node);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_linked_hash_table_put(struct aws_linked_hash_table *table, const void *key, void *p_value) {
+
+ struct aws_linked_hash_table_node *node =
+ aws_mem_calloc(table->allocator, 1, sizeof(struct aws_linked_hash_table_node));
+
+ if (!node) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_hash_element *element = NULL;
+ int was_added = 0;
+ int err_val = aws_hash_table_create(&table->table, key, &element, &was_added);
+
+ if (err_val) {
+ aws_mem_release(table->allocator, node);
+ return err_val;
+ }
+
+ if (element->value) {
+ s_element_destroy(element->value);
+ }
+
+ node->value = p_value;
+ node->key = key;
+ node->table = table;
+ element->value = node;
+
+ aws_linked_list_push_back(&table->list, &node->node);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_linked_hash_table_remove(struct aws_linked_hash_table *table, const void *key) {
+ /* allocated table memory and the linked list entry will be removed in the
+ * callback. */
+ return aws_hash_table_remove(&table->table, key, NULL, NULL);
+}
+
+void aws_linked_hash_table_clear(struct aws_linked_hash_table *table) {
+ /* clearing the table will remove all elements. That will also deallocate
+ * any entries we currently have. */
+ aws_hash_table_clear(&table->table);
+}
+
+size_t aws_linked_hash_table_get_element_count(const struct aws_linked_hash_table *table) {
+ return aws_hash_table_get_entry_count(&table->table);
+}
+
+void aws_linked_hash_table_move_node_to_end_of_list(
+ struct aws_linked_hash_table *table,
+ struct aws_linked_hash_table_node *node) {
+
+ aws_linked_list_remove(&node->node);
+ aws_linked_list_push_back(&table->list, &node->node);
+}
+
+const struct aws_linked_list *aws_linked_hash_table_get_iteration_list(const struct aws_linked_hash_table *table) {
+ return &table->list;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/log_channel.c b/contrib/restricted/aws/aws-c-common/source/log_channel.c
index f33718ff26..bacc8a7e02 100644
--- a/contrib/restricted/aws/aws-c-common/source/log_channel.c
+++ b/contrib/restricted/aws/aws-c-common/source/log_channel.c
@@ -1,247 +1,247 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/log_channel.h>
-
-#include <aws/common/condition_variable.h>
-#include <aws/common/log_writer.h>
-#include <aws/common/mutex.h>
-#include <aws/common/string.h>
-#include <aws/common/thread.h>
-
-#include <stdio.h>
-
-/*
- * Basic channel implementations - synchronized foreground, synchronized background
- */
-
-struct aws_log_foreground_channel {
- struct aws_mutex sync;
-};
-
-static int s_foreground_channel_send(struct aws_log_channel *channel, struct aws_string *log_line) {
-
- struct aws_log_foreground_channel *impl = (struct aws_log_foreground_channel *)channel->impl;
-
- AWS_ASSERT(channel->writer->vtable->write);
-
- aws_mutex_lock(&impl->sync);
- (channel->writer->vtable->write)(channel->writer, log_line);
- aws_mutex_unlock(&impl->sync);
-
- /*
- * send is considered a transfer of ownership. write is not a transfer of ownership.
- * So it's always the channel's responsibility to clean up all log lines that enter
- * it as soon as they are no longer needed.
- */
- aws_string_destroy(log_line);
-
- return AWS_OP_SUCCESS;
-}
-
-static void s_foreground_channel_clean_up(struct aws_log_channel *channel) {
- struct aws_log_foreground_channel *impl = (struct aws_log_foreground_channel *)channel->impl;
-
- aws_mutex_clean_up(&impl->sync);
-
- aws_mem_release(channel->allocator, impl);
-}
-
-static struct aws_log_channel_vtable s_foreground_channel_vtable = {
- .send = s_foreground_channel_send,
- .clean_up = s_foreground_channel_clean_up,
-};
-
-int aws_log_channel_init_foreground(
- struct aws_log_channel *channel,
- struct aws_allocator *allocator,
- struct aws_log_writer *writer) {
- struct aws_log_foreground_channel *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_log_foreground_channel));
- if (impl == NULL) {
- return AWS_OP_ERR;
- }
-
- if (aws_mutex_init(&impl->sync)) {
- aws_mem_release(allocator, impl);
- return AWS_OP_ERR;
- }
-
- channel->vtable = &s_foreground_channel_vtable;
- channel->allocator = allocator;
- channel->writer = writer;
- channel->impl = impl;
-
- return AWS_OP_SUCCESS;
-}
-
-struct aws_log_background_channel {
- struct aws_mutex sync;
- struct aws_thread background_thread;
- struct aws_array_list pending_log_lines;
- struct aws_condition_variable pending_line_signal;
- bool finished;
-};
-
-static int s_background_channel_send(struct aws_log_channel *channel, struct aws_string *log_line) {
-
- struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl;
-
- aws_mutex_lock(&impl->sync);
- aws_array_list_push_back(&impl->pending_log_lines, &log_line);
- aws_condition_variable_notify_one(&impl->pending_line_signal);
- aws_mutex_unlock(&impl->sync);
-
- return AWS_OP_SUCCESS;
-}
-
-static void s_background_channel_clean_up(struct aws_log_channel *channel) {
- struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl;
-
- aws_mutex_lock(&impl->sync);
- impl->finished = true;
- aws_condition_variable_notify_one(&impl->pending_line_signal);
- aws_mutex_unlock(&impl->sync);
-
- aws_thread_join(&impl->background_thread);
-
- aws_thread_clean_up(&impl->background_thread);
- aws_condition_variable_clean_up(&impl->pending_line_signal);
- aws_array_list_clean_up(&impl->pending_log_lines);
- aws_mutex_clean_up(&impl->sync);
- aws_mem_release(channel->allocator, impl);
-}
-
-static struct aws_log_channel_vtable s_background_channel_vtable = {
- .send = s_background_channel_send,
- .clean_up = s_background_channel_clean_up,
-};
-
-static bool s_background_wait(void *context) {
- struct aws_log_background_channel *impl = (struct aws_log_background_channel *)context;
-
- /*
- * Condition variable predicates are checked under mutex protection
- */
- return impl->finished || aws_array_list_length(&impl->pending_log_lines) > 0;
-}
-
-static void s_background_thread_writer(void *thread_data) {
- (void)thread_data;
-
- struct aws_log_channel *channel = (struct aws_log_channel *)thread_data;
- AWS_ASSERT(channel->writer->vtable->write);
-
- struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl;
-
- struct aws_array_list log_lines;
-
- AWS_FATAL_ASSERT(aws_array_list_init_dynamic(&log_lines, channel->allocator, 10, sizeof(struct aws_string *)) == 0);
-
- while (true) {
- aws_mutex_lock(&impl->sync);
- aws_condition_variable_wait_pred(&impl->pending_line_signal, &impl->sync, s_background_wait, impl);
-
- size_t line_count = aws_array_list_length(&impl->pending_log_lines);
- bool finished = impl->finished;
-
- if (line_count == 0) {
- aws_mutex_unlock(&impl->sync);
- if (finished) {
- break;
- }
- continue;
- }
-
- aws_array_list_swap_contents(&impl->pending_log_lines, &log_lines);
- aws_mutex_unlock(&impl->sync);
-
- /*
- * Consider copying these into a page-sized stack buffer (string) and then making the write calls
- * against it rather than the individual strings. Might be a savings when > 1 lines (cut down on
- * write calls).
- */
- for (size_t i = 0; i < line_count; ++i) {
- struct aws_string *log_line = NULL;
- AWS_FATAL_ASSERT(aws_array_list_get_at(&log_lines, &log_line, i) == AWS_OP_SUCCESS);
-
- (channel->writer->vtable->write)(channel->writer, log_line);
-
- /*
- * send is considered a transfer of ownership. write is not a transfer of ownership.
- * So it's always the channel's responsibility to clean up all log lines that enter
- * it as soon as they are no longer needed.
- */
- aws_string_destroy(log_line);
- }
-
- aws_array_list_clear(&log_lines);
- }
-
- aws_array_list_clean_up(&log_lines);
-}
-
-int aws_log_channel_init_background(
- struct aws_log_channel *channel,
- struct aws_allocator *allocator,
- struct aws_log_writer *writer) {
- struct aws_log_background_channel *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_log_background_channel));
- if (impl == NULL) {
- return AWS_OP_ERR;
- }
-
- impl->finished = false;
-
- if (aws_mutex_init(&impl->sync)) {
- goto clean_up_sync_init_fail;
- }
-
- if (aws_array_list_init_dynamic(&impl->pending_log_lines, allocator, 10, sizeof(struct aws_string *))) {
- goto clean_up_pending_log_lines_init_fail;
- }
-
- if (aws_condition_variable_init(&impl->pending_line_signal)) {
- goto clean_up_pending_line_signal_init_fail;
- }
-
- if (aws_thread_init(&impl->background_thread, allocator)) {
- goto clean_up_background_thread_init_fail;
- }
-
- channel->vtable = &s_background_channel_vtable;
- channel->allocator = allocator;
- channel->impl = impl;
- channel->writer = writer;
-
- /*
- * Logging thread should need very little stack, but let's defer this to later
- */
- struct aws_thread_options thread_options = {.stack_size = 0};
-
- if (aws_thread_launch(&impl->background_thread, s_background_thread_writer, channel, &thread_options) ==
- AWS_OP_SUCCESS) {
- return AWS_OP_SUCCESS;
- }
-
- aws_thread_clean_up(&impl->background_thread);
-
-clean_up_background_thread_init_fail:
- aws_condition_variable_clean_up(&impl->pending_line_signal);
-
-clean_up_pending_line_signal_init_fail:
- aws_array_list_clean_up(&impl->pending_log_lines);
-
-clean_up_pending_log_lines_init_fail:
- aws_mutex_clean_up(&impl->sync);
-
-clean_up_sync_init_fail:
- aws_mem_release(allocator, impl);
-
- return AWS_OP_ERR;
-}
-
-void aws_log_channel_clean_up(struct aws_log_channel *channel) {
- AWS_ASSERT(channel->vtable->clean_up);
- (channel->vtable->clean_up)(channel);
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/log_channel.h>
+
+#include <aws/common/condition_variable.h>
+#include <aws/common/log_writer.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/common/thread.h>
+
+#include <stdio.h>
+
+/*
+ * Basic channel implementations - synchronized foreground, synchronized background
+ */
+
+struct aws_log_foreground_channel {
+ struct aws_mutex sync;
+};
+
+static int s_foreground_channel_send(struct aws_log_channel *channel, struct aws_string *log_line) {
+
+ struct aws_log_foreground_channel *impl = (struct aws_log_foreground_channel *)channel->impl;
+
+ AWS_ASSERT(channel->writer->vtable->write);
+
+ aws_mutex_lock(&impl->sync);
+ (channel->writer->vtable->write)(channel->writer, log_line);
+ aws_mutex_unlock(&impl->sync);
+
+ /*
+ * send is considered a transfer of ownership. write is not a transfer of ownership.
+ * So it's always the channel's responsibility to clean up all log lines that enter
+ * it as soon as they are no longer needed.
+ */
+ aws_string_destroy(log_line);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_foreground_channel_clean_up(struct aws_log_channel *channel) {
+ struct aws_log_foreground_channel *impl = (struct aws_log_foreground_channel *)channel->impl;
+
+ aws_mutex_clean_up(&impl->sync);
+
+ aws_mem_release(channel->allocator, impl);
+}
+
+static struct aws_log_channel_vtable s_foreground_channel_vtable = {
+ .send = s_foreground_channel_send,
+ .clean_up = s_foreground_channel_clean_up,
+};
+
+int aws_log_channel_init_foreground(
+ struct aws_log_channel *channel,
+ struct aws_allocator *allocator,
+ struct aws_log_writer *writer) {
+ struct aws_log_foreground_channel *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_log_foreground_channel));
+ if (impl == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mutex_init(&impl->sync)) {
+ aws_mem_release(allocator, impl);
+ return AWS_OP_ERR;
+ }
+
+ channel->vtable = &s_foreground_channel_vtable;
+ channel->allocator = allocator;
+ channel->writer = writer;
+ channel->impl = impl;
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_log_background_channel {
+ struct aws_mutex sync;
+ struct aws_thread background_thread;
+ struct aws_array_list pending_log_lines;
+ struct aws_condition_variable pending_line_signal;
+ bool finished;
+};
+
+static int s_background_channel_send(struct aws_log_channel *channel, struct aws_string *log_line) {
+
+ struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl;
+
+ aws_mutex_lock(&impl->sync);
+ aws_array_list_push_back(&impl->pending_log_lines, &log_line);
+ aws_condition_variable_notify_one(&impl->pending_line_signal);
+ aws_mutex_unlock(&impl->sync);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_background_channel_clean_up(struct aws_log_channel *channel) {
+ struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl;
+
+ aws_mutex_lock(&impl->sync);
+ impl->finished = true;
+ aws_condition_variable_notify_one(&impl->pending_line_signal);
+ aws_mutex_unlock(&impl->sync);
+
+ aws_thread_join(&impl->background_thread);
+
+ aws_thread_clean_up(&impl->background_thread);
+ aws_condition_variable_clean_up(&impl->pending_line_signal);
+ aws_array_list_clean_up(&impl->pending_log_lines);
+ aws_mutex_clean_up(&impl->sync);
+ aws_mem_release(channel->allocator, impl);
+}
+
+static struct aws_log_channel_vtable s_background_channel_vtable = {
+ .send = s_background_channel_send,
+ .clean_up = s_background_channel_clean_up,
+};
+
+static bool s_background_wait(void *context) {
+ struct aws_log_background_channel *impl = (struct aws_log_background_channel *)context;
+
+ /*
+ * Condition variable predicates are checked under mutex protection
+ */
+ return impl->finished || aws_array_list_length(&impl->pending_log_lines) > 0;
+}
+
+static void s_background_thread_writer(void *thread_data) {
+ (void)thread_data;
+
+ struct aws_log_channel *channel = (struct aws_log_channel *)thread_data;
+ AWS_ASSERT(channel->writer->vtable->write);
+
+ struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl;
+
+ struct aws_array_list log_lines;
+
+ AWS_FATAL_ASSERT(aws_array_list_init_dynamic(&log_lines, channel->allocator, 10, sizeof(struct aws_string *)) == 0);
+
+ while (true) {
+ aws_mutex_lock(&impl->sync);
+ aws_condition_variable_wait_pred(&impl->pending_line_signal, &impl->sync, s_background_wait, impl);
+
+ size_t line_count = aws_array_list_length(&impl->pending_log_lines);
+ bool finished = impl->finished;
+
+ if (line_count == 0) {
+ aws_mutex_unlock(&impl->sync);
+ if (finished) {
+ break;
+ }
+ continue;
+ }
+
+ aws_array_list_swap_contents(&impl->pending_log_lines, &log_lines);
+ aws_mutex_unlock(&impl->sync);
+
+ /*
+ * Consider copying these into a page-sized stack buffer (string) and then making the write calls
+ * against it rather than the individual strings. Might be a savings when > 1 lines (cut down on
+ * write calls).
+ */
+ for (size_t i = 0; i < line_count; ++i) {
+ struct aws_string *log_line = NULL;
+ AWS_FATAL_ASSERT(aws_array_list_get_at(&log_lines, &log_line, i) == AWS_OP_SUCCESS);
+
+ (channel->writer->vtable->write)(channel->writer, log_line);
+
+ /*
+ * send is considered a transfer of ownership. write is not a transfer of ownership.
+ * So it's always the channel's responsibility to clean up all log lines that enter
+ * it as soon as they are no longer needed.
+ */
+ aws_string_destroy(log_line);
+ }
+
+ aws_array_list_clear(&log_lines);
+ }
+
+ aws_array_list_clean_up(&log_lines);
+}
+
+int aws_log_channel_init_background(
+ struct aws_log_channel *channel,
+ struct aws_allocator *allocator,
+ struct aws_log_writer *writer) {
+ struct aws_log_background_channel *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_log_background_channel));
+ if (impl == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ impl->finished = false;
+
+ if (aws_mutex_init(&impl->sync)) {
+ goto clean_up_sync_init_fail;
+ }
+
+ if (aws_array_list_init_dynamic(&impl->pending_log_lines, allocator, 10, sizeof(struct aws_string *))) {
+ goto clean_up_pending_log_lines_init_fail;
+ }
+
+ if (aws_condition_variable_init(&impl->pending_line_signal)) {
+ goto clean_up_pending_line_signal_init_fail;
+ }
+
+ if (aws_thread_init(&impl->background_thread, allocator)) {
+ goto clean_up_background_thread_init_fail;
+ }
+
+ channel->vtable = &s_background_channel_vtable;
+ channel->allocator = allocator;
+ channel->impl = impl;
+ channel->writer = writer;
+
+ /*
+ * Logging thread should need very little stack, but let's defer this to later
+ */
+ struct aws_thread_options thread_options = {.stack_size = 0};
+
+ if (aws_thread_launch(&impl->background_thread, s_background_thread_writer, channel, &thread_options) ==
+ AWS_OP_SUCCESS) {
+ return AWS_OP_SUCCESS;
+ }
+
+ aws_thread_clean_up(&impl->background_thread);
+
+clean_up_background_thread_init_fail:
+ aws_condition_variable_clean_up(&impl->pending_line_signal);
+
+clean_up_pending_line_signal_init_fail:
+ aws_array_list_clean_up(&impl->pending_log_lines);
+
+clean_up_pending_log_lines_init_fail:
+ aws_mutex_clean_up(&impl->sync);
+
+clean_up_sync_init_fail:
+ aws_mem_release(allocator, impl);
+
+ return AWS_OP_ERR;
+}
+
+void aws_log_channel_clean_up(struct aws_log_channel *channel) {
+ AWS_ASSERT(channel->vtable->clean_up);
+ (channel->vtable->clean_up)(channel);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/log_formatter.c b/contrib/restricted/aws/aws-c-common/source/log_formatter.c
index 489cdf5ded..513a7f87b4 100644
--- a/contrib/restricted/aws/aws-c-common/source/log_formatter.c
+++ b/contrib/restricted/aws/aws-c-common/source/log_formatter.c
@@ -1,297 +1,297 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/log_formatter.h>
-
-#include <aws/common/date_time.h>
-#include <aws/common/string.h>
-#include <aws/common/thread.h>
-
-#include <inttypes.h>
-#include <stdarg.h>
-
-/*
- * Default formatter implementation
- */
-
-#if _MSC_VER
-# pragma warning(disable : 4204) /* non-constant aggregate initializer */
-#endif
-
-/* (max) strlen of "[<LogLevel>]" */
-#define LOG_LEVEL_PREFIX_PADDING 7
-
-/* (max) strlen of "[<ThreadId>]" */
-#define THREAD_ID_PREFIX_PADDING 22
-
-/* strlen of (user-content separator) " - " + "\n" + spaces between prefix fields + brackets around timestamp + 1 +
- subject_name padding */
-#define MISC_PADDING 15
-
-#define MAX_LOG_LINE_PREFIX_SIZE \
- (LOG_LEVEL_PREFIX_PADDING + THREAD_ID_PREFIX_PADDING + MISC_PADDING + AWS_DATE_TIME_STR_MAX_LEN)
-
-static size_t s_advance_and_clamp_index(size_t current_index, int amount, size_t maximum) {
- size_t next_index = current_index + amount;
- if (next_index > maximum) {
- next_index = maximum;
- }
-
- return next_index;
-}
-
-/* Thread-local string representation of current thread id */
-AWS_THREAD_LOCAL struct {
- bool is_valid;
- char repr[AWS_THREAD_ID_T_REPR_BUFSZ];
-} tl_logging_thread_id = {.is_valid = false};
-
-int aws_format_standard_log_line(struct aws_logging_standard_formatting_data *formatting_data, va_list args) {
- size_t current_index = 0;
-
- /*
- * Begin the log line with "[<Log Level>] ["
- */
- const char *level_string = NULL;
- if (aws_log_level_to_string(formatting_data->level, &level_string)) {
- return AWS_OP_ERR;
- }
-
- if (formatting_data->total_length == 0) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- /*
- * Use this length for all but the last write, so we guarantee room for the newline even if we get truncated
- */
- size_t fake_total_length = formatting_data->total_length - 1;
-
- int log_level_length = snprintf(formatting_data->log_line_buffer, fake_total_length, "[%s] [", level_string);
- if (log_level_length < 0) {
- return AWS_OP_ERR;
- }
-
- current_index = s_advance_and_clamp_index(current_index, log_level_length, fake_total_length);
-
- if (current_index < fake_total_length) {
- /*
- * Add the timestamp. To avoid copies and allocations, do some byte buffer tomfoolery.
- *
- * First, make a byte_buf that points to the current position in the output string
- */
- struct aws_byte_buf timestamp_buffer = {
- .allocator = formatting_data->allocator,
- .buffer = (uint8_t *)formatting_data->log_line_buffer + current_index,
- .capacity = fake_total_length - current_index,
- .len = 0,
- };
-
- /*
- * Output the current time to the byte_buf
- */
- struct aws_date_time current_time;
- aws_date_time_init_now(&current_time);
-
- int result = aws_date_time_to_utc_time_str(&current_time, formatting_data->date_format, &timestamp_buffer);
- if (result != AWS_OP_SUCCESS) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- current_index = s_advance_and_clamp_index(current_index, (int)timestamp_buffer.len, fake_total_length);
- }
-
- if (current_index < fake_total_length) {
- /*
- * Add thread id and user content separator (" - ")
- */
- if (!tl_logging_thread_id.is_valid) {
- aws_thread_id_t current_thread_id = aws_thread_current_thread_id();
- if (aws_thread_id_t_to_string(current_thread_id, tl_logging_thread_id.repr, AWS_THREAD_ID_T_REPR_BUFSZ)) {
- return AWS_OP_ERR;
- }
- tl_logging_thread_id.is_valid = true;
- }
- int thread_id_written = snprintf(
- formatting_data->log_line_buffer + current_index,
- fake_total_length - current_index,
- "] [%s] ",
- tl_logging_thread_id.repr);
- if (thread_id_written < 0) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
- current_index = s_advance_and_clamp_index(current_index, thread_id_written, fake_total_length);
- }
-
- if (current_index < fake_total_length) {
- /* output subject name */
- if (formatting_data->subject_name) {
- int subject_written = snprintf(
- formatting_data->log_line_buffer + current_index,
- fake_total_length - current_index,
- "[%s]",
- formatting_data->subject_name);
-
- if (subject_written < 0) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- current_index = s_advance_and_clamp_index(current_index, subject_written, fake_total_length);
- }
- }
-
- if (current_index < fake_total_length) {
- int separator_written =
- snprintf(formatting_data->log_line_buffer + current_index, fake_total_length - current_index, " - ");
- current_index = s_advance_and_clamp_index(current_index, separator_written, fake_total_length);
- }
-
- if (current_index < fake_total_length) {
- /*
- * Now write the actual data requested by the user
- */
-#ifdef _WIN32
- int written_count = vsnprintf_s(
- formatting_data->log_line_buffer + current_index,
- fake_total_length - current_index,
- _TRUNCATE,
- formatting_data->format,
- args);
-#else
- int written_count = vsnprintf(
- formatting_data->log_line_buffer + current_index,
- fake_total_length - current_index,
- formatting_data->format,
- args);
-#endif /* _WIN32 */
- if (written_count < 0) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- current_index = s_advance_and_clamp_index(current_index, written_count, fake_total_length);
- }
-
- /*
- * End with a newline.
- */
- int newline_written_count =
- snprintf(formatting_data->log_line_buffer + current_index, formatting_data->total_length - current_index, "\n");
- if (newline_written_count < 0) {
- return aws_raise_error(AWS_ERROR_UNKNOWN); /* we saved space, so this would be crazy */
- }
-
- formatting_data->amount_written = current_index + newline_written_count;
-
- return AWS_OP_SUCCESS;
-}
-
-struct aws_default_log_formatter_impl {
- enum aws_date_format date_format;
-};
-
-static int s_default_aws_log_formatter_format(
- struct aws_log_formatter *formatter,
- struct aws_string **formatted_output,
- enum aws_log_level level,
- aws_log_subject_t subject,
- const char *format,
- va_list args) {
-
- (void)subject;
-
- struct aws_default_log_formatter_impl *impl = formatter->impl;
-
- if (formatted_output == NULL) {
- return AWS_OP_ERR;
- }
-
- /*
- * Calculate how much room we'll need to build the full log line.
- * You cannot consume a va_list twice, so we have to copy it.
- */
- va_list tmp_args;
- va_copy(tmp_args, args);
-#ifdef _WIN32
- int required_length = _vscprintf(format, tmp_args) + 1;
-#else
- int required_length = vsnprintf(NULL, 0, format, tmp_args) + 1;
-#endif
- va_end(tmp_args);
-
- /*
- * Allocate enough room to hold the line. Then we'll (unsafely) do formatted IO directly into the aws_string
- * memory.
- */
- const char *subject_name = aws_log_subject_name(subject);
- int subject_name_len = 0;
-
- if (subject_name) {
- subject_name_len = (int)strlen(subject_name);
- }
-
- int total_length = required_length + MAX_LOG_LINE_PREFIX_SIZE + subject_name_len;
- struct aws_string *raw_string = aws_mem_calloc(formatter->allocator, 1, sizeof(struct aws_string) + total_length);
- if (raw_string == NULL) {
- goto error_clean_up;
- }
-
- struct aws_logging_standard_formatting_data format_data = {
- .log_line_buffer = (char *)raw_string->bytes,
- .total_length = total_length,
- .level = level,
- .subject_name = subject_name,
- .format = format,
- .date_format = impl->date_format,
- .allocator = formatter->allocator,
- .amount_written = 0,
- };
-
- if (aws_format_standard_log_line(&format_data, args)) {
- goto error_clean_up;
- }
-
- *(struct aws_allocator **)(&raw_string->allocator) = formatter->allocator;
- *(size_t *)(&raw_string->len) = format_data.amount_written;
-
- *formatted_output = raw_string;
-
- return AWS_OP_SUCCESS;
-
-error_clean_up:
-
- if (raw_string != NULL) {
- aws_mem_release(formatter->allocator, raw_string);
- }
-
- return AWS_OP_ERR;
-}
-
-static void s_default_aws_log_formatter_clean_up(struct aws_log_formatter *formatter) {
- aws_mem_release(formatter->allocator, formatter->impl);
-}
-
-static struct aws_log_formatter_vtable s_default_log_formatter_vtable = {
- .format = s_default_aws_log_formatter_format,
- .clean_up = s_default_aws_log_formatter_clean_up,
-};
-
-int aws_log_formatter_init_default(
- struct aws_log_formatter *formatter,
- struct aws_allocator *allocator,
- struct aws_log_formatter_standard_options *options) {
- struct aws_default_log_formatter_impl *impl =
- aws_mem_calloc(allocator, 1, sizeof(struct aws_default_log_formatter_impl));
- impl->date_format = options->date_format;
-
- formatter->vtable = &s_default_log_formatter_vtable;
- formatter->allocator = allocator;
- formatter->impl = impl;
-
- return AWS_OP_SUCCESS;
-}
-
-void aws_log_formatter_clean_up(struct aws_log_formatter *formatter) {
- AWS_ASSERT(formatter->vtable->clean_up);
- (formatter->vtable->clean_up)(formatter);
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/log_formatter.h>
+
+#include <aws/common/date_time.h>
+#include <aws/common/string.h>
+#include <aws/common/thread.h>
+
+#include <inttypes.h>
+#include <stdarg.h>
+
+/*
+ * Default formatter implementation
+ */
+
+#if _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/* (max) strlen of "[<LogLevel>]" */
+#define LOG_LEVEL_PREFIX_PADDING 7
+
+/* (max) strlen of "[<ThreadId>]" */
+#define THREAD_ID_PREFIX_PADDING 22
+
+/* strlen of (user-content separator) " - " + "\n" + spaces between prefix fields + brackets around timestamp + 1 +
+ subject_name padding */
+#define MISC_PADDING 15
+
+#define MAX_LOG_LINE_PREFIX_SIZE \
+ (LOG_LEVEL_PREFIX_PADDING + THREAD_ID_PREFIX_PADDING + MISC_PADDING + AWS_DATE_TIME_STR_MAX_LEN)
+
+static size_t s_advance_and_clamp_index(size_t current_index, int amount, size_t maximum) {
+ size_t next_index = current_index + amount;
+ if (next_index > maximum) {
+ next_index = maximum;
+ }
+
+ return next_index;
+}
+
+/* Thread-local string representation of current thread id */
+AWS_THREAD_LOCAL struct {
+ bool is_valid;
+ char repr[AWS_THREAD_ID_T_REPR_BUFSZ];
+} tl_logging_thread_id = {.is_valid = false};
+
+int aws_format_standard_log_line(struct aws_logging_standard_formatting_data *formatting_data, va_list args) {
+ size_t current_index = 0;
+
+ /*
+ * Begin the log line with "[<Log Level>] ["
+ */
+ const char *level_string = NULL;
+ if (aws_log_level_to_string(formatting_data->level, &level_string)) {
+ return AWS_OP_ERR;
+ }
+
+ if (formatting_data->total_length == 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /*
+ * Use this length for all but the last write, so we guarantee room for the newline even if we get truncated
+ */
+ size_t fake_total_length = formatting_data->total_length - 1;
+
+ int log_level_length = snprintf(formatting_data->log_line_buffer, fake_total_length, "[%s] [", level_string);
+ if (log_level_length < 0) {
+ return AWS_OP_ERR;
+ }
+
+ current_index = s_advance_and_clamp_index(current_index, log_level_length, fake_total_length);
+
+ if (current_index < fake_total_length) {
+ /*
+ * Add the timestamp. To avoid copies and allocations, do some byte buffer tomfoolery.
+ *
+ * First, make a byte_buf that points to the current position in the output string
+ */
+ struct aws_byte_buf timestamp_buffer = {
+ .allocator = formatting_data->allocator,
+ .buffer = (uint8_t *)formatting_data->log_line_buffer + current_index,
+ .capacity = fake_total_length - current_index,
+ .len = 0,
+ };
+
+ /*
+ * Output the current time to the byte_buf
+ */
+ struct aws_date_time current_time;
+ aws_date_time_init_now(&current_time);
+
+ int result = aws_date_time_to_utc_time_str(&current_time, formatting_data->date_format, &timestamp_buffer);
+ if (result != AWS_OP_SUCCESS) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ current_index = s_advance_and_clamp_index(current_index, (int)timestamp_buffer.len, fake_total_length);
+ }
+
+ if (current_index < fake_total_length) {
+ /*
+ * Add thread id and user content separator (" - ")
+ */
+ if (!tl_logging_thread_id.is_valid) {
+ aws_thread_id_t current_thread_id = aws_thread_current_thread_id();
+ if (aws_thread_id_t_to_string(current_thread_id, tl_logging_thread_id.repr, AWS_THREAD_ID_T_REPR_BUFSZ)) {
+ return AWS_OP_ERR;
+ }
+ tl_logging_thread_id.is_valid = true;
+ }
+ int thread_id_written = snprintf(
+ formatting_data->log_line_buffer + current_index,
+ fake_total_length - current_index,
+ "] [%s] ",
+ tl_logging_thread_id.repr);
+ if (thread_id_written < 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ current_index = s_advance_and_clamp_index(current_index, thread_id_written, fake_total_length);
+ }
+
+ if (current_index < fake_total_length) {
+ /* output subject name */
+ if (formatting_data->subject_name) {
+ int subject_written = snprintf(
+ formatting_data->log_line_buffer + current_index,
+ fake_total_length - current_index,
+ "[%s]",
+ formatting_data->subject_name);
+
+ if (subject_written < 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ current_index = s_advance_and_clamp_index(current_index, subject_written, fake_total_length);
+ }
+ }
+
+ if (current_index < fake_total_length) {
+ int separator_written =
+ snprintf(formatting_data->log_line_buffer + current_index, fake_total_length - current_index, " - ");
+ current_index = s_advance_and_clamp_index(current_index, separator_written, fake_total_length);
+ }
+
+ if (current_index < fake_total_length) {
+ /*
+ * Now write the actual data requested by the user
+ */
+#ifdef _WIN32
+ int written_count = vsnprintf_s(
+ formatting_data->log_line_buffer + current_index,
+ fake_total_length - current_index,
+ _TRUNCATE,
+ formatting_data->format,
+ args);
+#else
+ int written_count = vsnprintf(
+ formatting_data->log_line_buffer + current_index,
+ fake_total_length - current_index,
+ formatting_data->format,
+ args);
+#endif /* _WIN32 */
+ if (written_count < 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ current_index = s_advance_and_clamp_index(current_index, written_count, fake_total_length);
+ }
+
+ /*
+ * End with a newline.
+ */
+ int newline_written_count =
+ snprintf(formatting_data->log_line_buffer + current_index, formatting_data->total_length - current_index, "\n");
+ if (newline_written_count < 0) {
+ return aws_raise_error(AWS_ERROR_UNKNOWN); /* we saved space, so this would be crazy */
+ }
+
+ formatting_data->amount_written = current_index + newline_written_count;
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_default_log_formatter_impl {
+ enum aws_date_format date_format;
+};
+
+static int s_default_aws_log_formatter_format(
+ struct aws_log_formatter *formatter,
+ struct aws_string **formatted_output,
+ enum aws_log_level level,
+ aws_log_subject_t subject,
+ const char *format,
+ va_list args) {
+
+ (void)subject;
+
+ struct aws_default_log_formatter_impl *impl = formatter->impl;
+
+ if (formatted_output == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ /*
+ * Calculate how much room we'll need to build the full log line.
+ * You cannot consume a va_list twice, so we have to copy it.
+ */
+ va_list tmp_args;
+ va_copy(tmp_args, args);
+#ifdef _WIN32
+ int required_length = _vscprintf(format, tmp_args) + 1;
+#else
+ int required_length = vsnprintf(NULL, 0, format, tmp_args) + 1;
+#endif
+ va_end(tmp_args);
+
+ /*
+ * Allocate enough room to hold the line. Then we'll (unsafely) do formatted IO directly into the aws_string
+ * memory.
+ */
+ const char *subject_name = aws_log_subject_name(subject);
+ int subject_name_len = 0;
+
+ if (subject_name) {
+ subject_name_len = (int)strlen(subject_name);
+ }
+
+ int total_length = required_length + MAX_LOG_LINE_PREFIX_SIZE + subject_name_len;
+ struct aws_string *raw_string = aws_mem_calloc(formatter->allocator, 1, sizeof(struct aws_string) + total_length);
+ if (raw_string == NULL) {
+ goto error_clean_up;
+ }
+
+ struct aws_logging_standard_formatting_data format_data = {
+ .log_line_buffer = (char *)raw_string->bytes,
+ .total_length = total_length,
+ .level = level,
+ .subject_name = subject_name,
+ .format = format,
+ .date_format = impl->date_format,
+ .allocator = formatter->allocator,
+ .amount_written = 0,
+ };
+
+ if (aws_format_standard_log_line(&format_data, args)) {
+ goto error_clean_up;
+ }
+
+ *(struct aws_allocator **)(&raw_string->allocator) = formatter->allocator;
+ *(size_t *)(&raw_string->len) = format_data.amount_written;
+
+ *formatted_output = raw_string;
+
+ return AWS_OP_SUCCESS;
+
+error_clean_up:
+
+ if (raw_string != NULL) {
+ aws_mem_release(formatter->allocator, raw_string);
+ }
+
+ return AWS_OP_ERR;
+}
+
+static void s_default_aws_log_formatter_clean_up(struct aws_log_formatter *formatter) {
+ aws_mem_release(formatter->allocator, formatter->impl);
+}
+
+static struct aws_log_formatter_vtable s_default_log_formatter_vtable = {
+ .format = s_default_aws_log_formatter_format,
+ .clean_up = s_default_aws_log_formatter_clean_up,
+};
+
+int aws_log_formatter_init_default(
+ struct aws_log_formatter *formatter,
+ struct aws_allocator *allocator,
+ struct aws_log_formatter_standard_options *options) {
+ struct aws_default_log_formatter_impl *impl =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_default_log_formatter_impl));
+ impl->date_format = options->date_format;
+
+ formatter->vtable = &s_default_log_formatter_vtable;
+ formatter->allocator = allocator;
+ formatter->impl = impl;
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_log_formatter_clean_up(struct aws_log_formatter *formatter) {
+ AWS_ASSERT(formatter->vtable->clean_up);
+ (formatter->vtable->clean_up)(formatter);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/log_writer.c b/contrib/restricted/aws/aws-c-common/source/log_writer.c
index 19b9b6f91a..7b31e406d1 100644
--- a/contrib/restricted/aws/aws-c-common/source/log_writer.c
+++ b/contrib/restricted/aws/aws-c-common/source/log_writer.c
@@ -1,117 +1,117 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/log_writer.h>
-
-#include <aws/common/string.h>
-
-#include <errno.h>
-#include <stdio.h>
-
-#ifdef _MSC_VER
-# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
-#endif /* _MSC_VER */
-
-/*
- * Basic log writer implementations - stdout, stderr, arbitrary file
- */
-
-struct aws_file_writer;
-
-struct aws_file_writer {
- FILE *log_file;
- bool close_file_on_cleanup;
-};
-
-static int s_aws_file_writer_write(struct aws_log_writer *writer, const struct aws_string *output) {
- struct aws_file_writer *impl = (struct aws_file_writer *)writer->impl;
-
- size_t length = output->len;
- if (fwrite(output->bytes, 1, length, impl->log_file) < length) {
- return aws_translate_and_raise_io_error(errno);
- }
-
- return AWS_OP_SUCCESS;
-}
-
-static void s_aws_file_writer_clean_up(struct aws_log_writer *writer) {
- struct aws_file_writer *impl = (struct aws_file_writer *)writer->impl;
-
- if (impl->close_file_on_cleanup) {
- fclose(impl->log_file);
- }
-
- aws_mem_release(writer->allocator, impl);
-}
-
-static struct aws_log_writer_vtable s_aws_file_writer_vtable = {
- .write = s_aws_file_writer_write,
- .clean_up = s_aws_file_writer_clean_up,
-};
-
-/*
- * Shared internal init implementation
- */
-static int s_aws_file_writer_init_internal(
- struct aws_log_writer *writer,
- struct aws_allocator *allocator,
- const char *file_name_to_open,
- FILE *currently_open_file) {
-
- /* One or the other should be set */
- if (!((file_name_to_open != NULL) ^ (currently_open_file != NULL))) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- /* Allocate and initialize the file writer */
- struct aws_file_writer *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_file_writer));
- if (impl == NULL) {
- return AWS_OP_ERR;
- }
-
- impl->log_file = NULL;
- impl->close_file_on_cleanup = false;
-
- /* Open file if name passed in */
- if (file_name_to_open != NULL) {
- impl->log_file = fopen(file_name_to_open, "a+");
- if (impl->log_file == NULL) {
- aws_mem_release(allocator, impl);
- return aws_translate_and_raise_io_error(errno);
- }
- impl->close_file_on_cleanup = true;
- } else {
- impl->log_file = currently_open_file;
- }
-
- writer->vtable = &s_aws_file_writer_vtable;
- writer->allocator = allocator;
- writer->impl = impl;
-
- return AWS_OP_SUCCESS;
-}
-
-/*
- * Public initialization interface
- */
-int aws_log_writer_init_stdout(struct aws_log_writer *writer, struct aws_allocator *allocator) {
- return s_aws_file_writer_init_internal(writer, allocator, NULL, stdout);
-}
-
-int aws_log_writer_init_stderr(struct aws_log_writer *writer, struct aws_allocator *allocator) {
- return s_aws_file_writer_init_internal(writer, allocator, NULL, stderr);
-}
-
-int aws_log_writer_init_file(
- struct aws_log_writer *writer,
- struct aws_allocator *allocator,
- struct aws_log_writer_file_options *options) {
- return s_aws_file_writer_init_internal(writer, allocator, options->filename, options->file);
-}
-
-void aws_log_writer_clean_up(struct aws_log_writer *writer) {
- AWS_ASSERT(writer->vtable->clean_up);
- (writer->vtable->clean_up)(writer);
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/log_writer.h>
+
+#include <aws/common/string.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
+#endif /* _MSC_VER */
+
+/*
+ * Basic log writer implementations - stdout, stderr, arbitrary file
+ */
+
+struct aws_file_writer;
+
+struct aws_file_writer {
+ FILE *log_file;
+ bool close_file_on_cleanup;
+};
+
+static int s_aws_file_writer_write(struct aws_log_writer *writer, const struct aws_string *output) {
+ struct aws_file_writer *impl = (struct aws_file_writer *)writer->impl;
+
+ size_t length = output->len;
+ if (fwrite(output->bytes, 1, length, impl->log_file) < length) {
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_file_writer_clean_up(struct aws_log_writer *writer) {
+ struct aws_file_writer *impl = (struct aws_file_writer *)writer->impl;
+
+ if (impl->close_file_on_cleanup) {
+ fclose(impl->log_file);
+ }
+
+ aws_mem_release(writer->allocator, impl);
+}
+
+static struct aws_log_writer_vtable s_aws_file_writer_vtable = {
+ .write = s_aws_file_writer_write,
+ .clean_up = s_aws_file_writer_clean_up,
+};
+
+/*
+ * Shared internal init implementation
+ */
+static int s_aws_file_writer_init_internal(
+ struct aws_log_writer *writer,
+ struct aws_allocator *allocator,
+ const char *file_name_to_open,
+ FILE *currently_open_file) {
+
+ /* One or the other should be set */
+ if (!((file_name_to_open != NULL) ^ (currently_open_file != NULL))) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* Allocate and initialize the file writer */
+ struct aws_file_writer *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_file_writer));
+ if (impl == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ impl->log_file = NULL;
+ impl->close_file_on_cleanup = false;
+
+ /* Open file if name passed in */
+ if (file_name_to_open != NULL) {
+ impl->log_file = fopen(file_name_to_open, "a+");
+ if (impl->log_file == NULL) {
+ aws_mem_release(allocator, impl);
+ return aws_translate_and_raise_io_error(errno);
+ }
+ impl->close_file_on_cleanup = true;
+ } else {
+ impl->log_file = currently_open_file;
+ }
+
+ writer->vtable = &s_aws_file_writer_vtable;
+ writer->allocator = allocator;
+ writer->impl = impl;
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Public initialization interface
+ */
+int aws_log_writer_init_stdout(struct aws_log_writer *writer, struct aws_allocator *allocator) {
+ return s_aws_file_writer_init_internal(writer, allocator, NULL, stdout);
+}
+
+int aws_log_writer_init_stderr(struct aws_log_writer *writer, struct aws_allocator *allocator) {
+ return s_aws_file_writer_init_internal(writer, allocator, NULL, stderr);
+}
+
+int aws_log_writer_init_file(
+ struct aws_log_writer *writer,
+ struct aws_allocator *allocator,
+ struct aws_log_writer_file_options *options) {
+ return s_aws_file_writer_init_internal(writer, allocator, options->filename, options->file);
+}
+
+void aws_log_writer_clean_up(struct aws_log_writer *writer) {
+ AWS_ASSERT(writer->vtable->clean_up);
+ (writer->vtable->clean_up)(writer);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/logging.c b/contrib/restricted/aws/aws-c-common/source/logging.c
index 3b35c690d5..1b96e1cc6b 100644
--- a/contrib/restricted/aws/aws-c-common/source/logging.c
+++ b/contrib/restricted/aws/aws-c-common/source/logging.c
@@ -1,525 +1,525 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/logging.h>
-
-#include <aws/common/string.h>
-
-#include <aws/common/log_channel.h>
-#include <aws/common/log_formatter.h>
-#include <aws/common/log_writer.h>
-#include <aws/common/mutex.h>
-
-#include <errno.h>
-#include <stdarg.h>
-
-#if _MSC_VER
-# pragma warning(disable : 4204) /* non-constant aggregate initializer */
-# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
-#endif
-
-/*
- * Null logger implementation
- */
-static enum aws_log_level s_null_logger_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) {
- (void)logger;
- (void)subject;
-
- return AWS_LL_NONE;
-}
-
-static int s_null_logger_log(
- struct aws_logger *logger,
- enum aws_log_level log_level,
- aws_log_subject_t subject,
- const char *format,
- ...) {
-
- (void)logger;
- (void)log_level;
- (void)subject;
- (void)format;
-
- return AWS_OP_SUCCESS;
-}
-
-static void s_null_logger_clean_up(struct aws_logger *logger) {
- (void)logger;
-}
-
-static struct aws_logger_vtable s_null_vtable = {
- .get_log_level = s_null_logger_get_log_level,
- .log = s_null_logger_log,
- .clean_up = s_null_logger_clean_up,
-};
-
-static struct aws_logger s_null_logger = {.vtable = &s_null_vtable, .allocator = NULL, .p_impl = NULL};
-
-/*
- * Pipeline logger implementation
- */
-static void s_aws_logger_pipeline_owned_clean_up(struct aws_logger *logger) {
- struct aws_logger_pipeline *impl = logger->p_impl;
-
- AWS_ASSERT(impl->channel->vtable->clean_up != NULL);
- (impl->channel->vtable->clean_up)(impl->channel);
-
- AWS_ASSERT(impl->formatter->vtable->clean_up != NULL);
- (impl->formatter->vtable->clean_up)(impl->formatter);
-
- AWS_ASSERT(impl->writer->vtable->clean_up != NULL);
- (impl->writer->vtable->clean_up)(impl->writer);
-
- aws_mem_release(impl->allocator, impl->channel);
- aws_mem_release(impl->allocator, impl->formatter);
- aws_mem_release(impl->allocator, impl->writer);
-
- aws_mem_release(impl->allocator, impl);
-}
-
-/*
- * Pipeline logger implementation
- */
-static int s_aws_logger_pipeline_log(
- struct aws_logger *logger,
- enum aws_log_level log_level,
- aws_log_subject_t subject,
- const char *format,
- ...) {
- va_list format_args;
- va_start(format_args, format);
-
- struct aws_logger_pipeline *impl = logger->p_impl;
- struct aws_string *output = NULL;
-
- AWS_ASSERT(impl->formatter->vtable->format != NULL);
- int result = (impl->formatter->vtable->format)(impl->formatter, &output, log_level, subject, format, format_args);
-
- va_end(format_args);
-
- if (result != AWS_OP_SUCCESS || output == NULL) {
- return AWS_OP_ERR;
- }
-
- AWS_ASSERT(impl->channel->vtable->send != NULL);
- if ((impl->channel->vtable->send)(impl->channel, output)) {
- /*
- * failure to send implies failure to transfer ownership
- */
- aws_string_destroy(output);
- return AWS_OP_ERR;
- }
-
- return AWS_OP_SUCCESS;
-}
-
-static enum aws_log_level s_aws_logger_pipeline_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) {
- (void)subject;
-
- struct aws_logger_pipeline *impl = logger->p_impl;
-
- return impl->level;
-}
-
-struct aws_logger_vtable g_pipeline_logger_owned_vtable = {
- .get_log_level = s_aws_logger_pipeline_get_log_level,
- .log = s_aws_logger_pipeline_log,
- .clean_up = s_aws_logger_pipeline_owned_clean_up,
-};
-
-int aws_logger_init_standard(
- struct aws_logger *logger,
- struct aws_allocator *allocator,
- struct aws_logger_standard_options *options) {
-
-#ifdef ANDROID
- (void)options;
- extern int aws_logger_init_logcat(
- struct aws_logger *, struct aws_allocator *, struct aws_logger_standard_options *);
- return aws_logger_init_logcat(logger, allocator, options);
-#endif
-
- struct aws_logger_pipeline *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_logger_pipeline));
- if (impl == NULL) {
- return AWS_OP_ERR;
- }
-
- struct aws_log_writer *writer = aws_mem_acquire(allocator, sizeof(struct aws_log_writer));
- if (writer == NULL) {
- goto on_allocate_writer_failure;
- }
-
- struct aws_log_writer_file_options file_writer_options = {
- .filename = options->filename,
- .file = options->file,
- };
-
- if (aws_log_writer_init_file(writer, allocator, &file_writer_options)) {
- goto on_init_writer_failure;
- }
-
- struct aws_log_formatter *formatter = aws_mem_acquire(allocator, sizeof(struct aws_log_formatter));
- if (formatter == NULL) {
- goto on_allocate_formatter_failure;
- }
-
- struct aws_log_formatter_standard_options formatter_options = {.date_format = AWS_DATE_FORMAT_ISO_8601};
-
- if (aws_log_formatter_init_default(formatter, allocator, &formatter_options)) {
- goto on_init_formatter_failure;
- }
-
- struct aws_log_channel *channel = aws_mem_acquire(allocator, sizeof(struct aws_log_channel));
- if (channel == NULL) {
- goto on_allocate_channel_failure;
- }
-
- if (aws_log_channel_init_background(channel, allocator, writer) == AWS_OP_SUCCESS) {
- impl->formatter = formatter;
- impl->channel = channel;
- impl->writer = writer;
- impl->allocator = allocator;
- impl->level = options->level;
-
- logger->vtable = &g_pipeline_logger_owned_vtable;
- logger->allocator = allocator;
- logger->p_impl = impl;
-
- return AWS_OP_SUCCESS;
- }
-
- aws_mem_release(allocator, channel);
-
-on_allocate_channel_failure:
- aws_log_formatter_clean_up(formatter);
-
-on_init_formatter_failure:
- aws_mem_release(allocator, formatter);
-
-on_allocate_formatter_failure:
- aws_log_writer_clean_up(writer);
-
-on_init_writer_failure:
- aws_mem_release(allocator, writer);
-
-on_allocate_writer_failure:
- aws_mem_release(allocator, impl);
-
- return AWS_OP_ERR;
-}
-
-/*
- * Pipeline logger implementation where all the components are externally owned. No clean up
- * is done on the components. Useful for tests where components are on the stack and often mocked.
- */
-static void s_aws_pipeline_logger_unowned_clean_up(struct aws_logger *logger) {
- struct aws_logger_pipeline *impl = (struct aws_logger_pipeline *)logger->p_impl;
-
- aws_mem_release(impl->allocator, impl);
-}
-
-static struct aws_logger_vtable s_pipeline_logger_unowned_vtable = {
- .get_log_level = s_aws_logger_pipeline_get_log_level,
- .log = s_aws_logger_pipeline_log,
- .clean_up = s_aws_pipeline_logger_unowned_clean_up,
-};
-
-int aws_logger_init_from_external(
- struct aws_logger *logger,
- struct aws_allocator *allocator,
- struct aws_log_formatter *formatter,
- struct aws_log_channel *channel,
- struct aws_log_writer *writer,
- enum aws_log_level level) {
-
- struct aws_logger_pipeline *impl = aws_mem_acquire(allocator, sizeof(struct aws_logger_pipeline));
-
- if (impl == NULL) {
- return AWS_OP_ERR;
- }
-
- impl->formatter = formatter;
- impl->channel = channel;
- impl->writer = writer;
- impl->allocator = allocator;
- impl->level = level;
-
- logger->vtable = &s_pipeline_logger_unowned_vtable;
- logger->allocator = allocator;
- logger->p_impl = impl;
-
- return AWS_OP_SUCCESS;
-}
-
-/*
- * Global API
- */
-static struct aws_logger *s_root_logger_ptr = &s_null_logger;
-
-void aws_logger_set(struct aws_logger *logger) {
- if (logger != NULL) {
- s_root_logger_ptr = logger;
- } else {
- s_root_logger_ptr = &s_null_logger;
- }
-}
-
-struct aws_logger *aws_logger_get(void) {
- return s_root_logger_ptr;
-}
-
-void aws_logger_clean_up(struct aws_logger *logger) {
- AWS_ASSERT(logger->vtable->clean_up != NULL);
-
- logger->vtable->clean_up(logger);
-}
-
-static const char *s_log_level_strings[AWS_LL_COUNT] = {"NONE", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"};
-
-int aws_log_level_to_string(enum aws_log_level log_level, const char **level_string) {
- AWS_ERROR_PRECONDITION(log_level < AWS_LL_COUNT);
-
- if (level_string != NULL) {
- *level_string = s_log_level_strings[log_level];
- }
-
- return AWS_OP_SUCCESS;
-}
-
-int aws_string_to_log_level(const char *level_string, enum aws_log_level *log_level) {
- if (level_string != NULL && log_level != NULL) {
- size_t level_length = strlen(level_string);
- for (int i = 0; i < AWS_LL_COUNT; ++i) {
- if (aws_array_eq_c_str_ignore_case(level_string, level_length, s_log_level_strings[i])) {
- *log_level = i;
- return AWS_OP_SUCCESS;
- }
- }
- }
-
- aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- return AWS_OP_ERR;
-}
-
-int aws_thread_id_t_to_string(aws_thread_id_t thread_id, char *buffer, size_t bufsz) {
- AWS_ERROR_PRECONDITION(AWS_THREAD_ID_T_REPR_BUFSZ == bufsz);
- AWS_ERROR_PRECONDITION(buffer && AWS_MEM_IS_WRITABLE(buffer, bufsz));
- size_t current_index = 0;
- unsigned char *bytes = (unsigned char *)&thread_id;
- for (size_t i = sizeof(aws_thread_id_t); i != 0; --i) {
- unsigned char c = bytes[i - 1];
- int written = snprintf(buffer + current_index, bufsz - current_index, "%02x", c);
- if (written < 0) {
- return AWS_OP_ERR;
- }
- current_index += written;
- if (bufsz <= current_index) {
- return AWS_OP_ERR;
- }
- }
- return AWS_OP_SUCCESS;
-}
-
-#define AWS_LOG_SUBJECT_SPACE_MASK (AWS_LOG_SUBJECT_STRIDE - 1)
-
-static const uint32_t S_MAX_LOG_SUBJECT = AWS_LOG_SUBJECT_STRIDE * AWS_PACKAGE_SLOTS - 1;
-
-static const struct aws_log_subject_info_list *volatile s_log_subject_slots[AWS_PACKAGE_SLOTS] = {0};
-
-static const struct aws_log_subject_info *s_get_log_subject_info_by_id(aws_log_subject_t subject) {
- if (subject > S_MAX_LOG_SUBJECT) {
- return NULL;
- }
-
- uint32_t slot_index = subject >> AWS_LOG_SUBJECT_STRIDE_BITS;
- uint32_t subject_index = subject & AWS_LOG_SUBJECT_SPACE_MASK;
-
- const struct aws_log_subject_info_list *subject_slot = s_log_subject_slots[slot_index];
-
- if (!subject_slot || subject_index >= subject_slot->count) {
- return NULL;
- }
-
- return &subject_slot->subject_list[subject_index];
-}
-
-const char *aws_log_subject_name(aws_log_subject_t subject) {
- const struct aws_log_subject_info *subject_info = s_get_log_subject_info_by_id(subject);
-
- if (subject_info != NULL) {
- return subject_info->subject_name;
- }
-
- return "Unknown";
-}
-
-void aws_register_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list) {
- /*
- * We're not so worried about these asserts being removed in an NDEBUG build
- * - we'll either segfault immediately (for the first two) or for the count
- * assert, the registration will be ineffective.
- */
- AWS_FATAL_ASSERT(log_subject_list);
- AWS_FATAL_ASSERT(log_subject_list->subject_list);
- AWS_FATAL_ASSERT(log_subject_list->count);
-
- const uint32_t min_range = log_subject_list->subject_list[0].subject_id;
- const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS;
-
- if (slot_index >= AWS_PACKAGE_SLOTS) {
- /* This is an NDEBUG build apparently. Kill the process rather than
- * corrupting heap. */
- fprintf(stderr, "Bad log subject slot index 0x%016x\n", slot_index);
- abort();
- }
-
- s_log_subject_slots[slot_index] = log_subject_list;
-}
-
-void aws_unregister_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list) {
- /*
- * We're not so worried about these asserts being removed in an NDEBUG build
- * - we'll either segfault immediately (for the first two) or for the count
- * assert, the registration will be ineffective.
- */
- AWS_FATAL_ASSERT(log_subject_list);
- AWS_FATAL_ASSERT(log_subject_list->subject_list);
- AWS_FATAL_ASSERT(log_subject_list->count);
-
- const uint32_t min_range = log_subject_list->subject_list[0].subject_id;
- const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS;
-
- if (slot_index >= AWS_PACKAGE_SLOTS) {
- /* This is an NDEBUG build apparently. Kill the process rather than
- * corrupting heap. */
- fprintf(stderr, "Bad log subject slot index 0x%016x\n", slot_index);
- AWS_FATAL_ASSERT(false);
- }
-
- s_log_subject_slots[slot_index] = NULL;
-}
-
-/*
- * no alloc implementation
- */
-struct aws_logger_noalloc {
- enum aws_log_level level;
- FILE *file;
- bool should_close;
- struct aws_mutex lock;
-};
-
-static enum aws_log_level s_noalloc_stderr_logger_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) {
- (void)subject;
-
- struct aws_logger_noalloc *impl = logger->p_impl;
- return impl->level;
-}
-
-#define MAXIMUM_NO_ALLOC_LOG_LINE_SIZE 8192
-
-static int s_noalloc_stderr_logger_log(
- struct aws_logger *logger,
- enum aws_log_level log_level,
- aws_log_subject_t subject,
- const char *format,
- ...) {
-
- char format_buffer[MAXIMUM_NO_ALLOC_LOG_LINE_SIZE];
-
- va_list format_args;
- va_start(format_args, format);
-
-#if _MSC_VER
-# pragma warning(push)
-# pragma warning(disable : 4221) /* allow struct member to reference format_buffer */
-#endif
-
- struct aws_logging_standard_formatting_data format_data = {
- .log_line_buffer = format_buffer,
- .total_length = MAXIMUM_NO_ALLOC_LOG_LINE_SIZE,
- .level = log_level,
- .subject_name = aws_log_subject_name(subject),
- .format = format,
- .date_format = AWS_DATE_FORMAT_ISO_8601,
- .allocator = logger->allocator,
- .amount_written = 0,
- };
-
-#if _MSC_VER
-# pragma warning(pop) /* disallow struct member to reference local value */
-#endif
-
- int result = aws_format_standard_log_line(&format_data, format_args);
-
- va_end(format_args);
-
- if (result == AWS_OP_ERR) {
- return AWS_OP_ERR;
- }
-
- struct aws_logger_noalloc *impl = logger->p_impl;
-
- aws_mutex_lock(&impl->lock);
-
- if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) {
- return aws_translate_and_raise_io_error(errno);
- }
-
- aws_mutex_unlock(&impl->lock);
-
- return AWS_OP_SUCCESS;
-}
-
-static void s_noalloc_stderr_logger_clean_up(struct aws_logger *logger) {
- if (logger == NULL) {
- return;
- }
-
- struct aws_logger_noalloc *impl = logger->p_impl;
- if (impl->should_close) {
- fclose(impl->file);
- }
-
- aws_mutex_clean_up(&impl->lock);
-
- aws_mem_release(logger->allocator, impl);
- AWS_ZERO_STRUCT(*logger);
-}
-
-static struct aws_logger_vtable s_noalloc_stderr_vtable = {
- .get_log_level = s_noalloc_stderr_logger_get_log_level,
- .log = s_noalloc_stderr_logger_log,
- .clean_up = s_noalloc_stderr_logger_clean_up,
-};
-
-int aws_logger_init_noalloc(
- struct aws_logger *logger,
- struct aws_allocator *allocator,
- struct aws_logger_standard_options *options) {
-
- struct aws_logger_noalloc *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_logger_noalloc));
-
- if (impl == NULL) {
- return AWS_OP_ERR;
- }
-
- impl->level = options->level;
- if (options->file != NULL) {
- impl->file = options->file;
- impl->should_close = false;
- } else { /* _MSC_VER */
- impl->file = fopen(options->filename, "w");
- impl->should_close = true;
- }
-
- aws_mutex_init(&impl->lock);
-
- logger->vtable = &s_noalloc_stderr_vtable;
- logger->allocator = allocator;
- logger->p_impl = impl;
-
- return AWS_OP_SUCCESS;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/logging.h>
+
+#include <aws/common/string.h>
+
+#include <aws/common/log_channel.h>
+#include <aws/common/log_formatter.h>
+#include <aws/common/log_writer.h>
+#include <aws/common/mutex.h>
+
+#include <errno.h>
+#include <stdarg.h>
+
+#if _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */
+#endif
+
+/*
+ * Null logger implementation
+ */
+static enum aws_log_level s_null_logger_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) {
+ (void)logger;
+ (void)subject;
+
+ return AWS_LL_NONE;
+}
+
+static int s_null_logger_log(
+ struct aws_logger *logger,
+ enum aws_log_level log_level,
+ aws_log_subject_t subject,
+ const char *format,
+ ...) {
+
+ (void)logger;
+ (void)log_level;
+ (void)subject;
+ (void)format;
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_null_logger_clean_up(struct aws_logger *logger) {
+ (void)logger;
+}
+
+static struct aws_logger_vtable s_null_vtable = {
+ .get_log_level = s_null_logger_get_log_level,
+ .log = s_null_logger_log,
+ .clean_up = s_null_logger_clean_up,
+};
+
+static struct aws_logger s_null_logger = {.vtable = &s_null_vtable, .allocator = NULL, .p_impl = NULL};
+
+/*
+ * Pipeline logger implementation
+ */
+static void s_aws_logger_pipeline_owned_clean_up(struct aws_logger *logger) {
+ struct aws_logger_pipeline *impl = logger->p_impl;
+
+ AWS_ASSERT(impl->channel->vtable->clean_up != NULL);
+ (impl->channel->vtable->clean_up)(impl->channel);
+
+ AWS_ASSERT(impl->formatter->vtable->clean_up != NULL);
+ (impl->formatter->vtable->clean_up)(impl->formatter);
+
+ AWS_ASSERT(impl->writer->vtable->clean_up != NULL);
+ (impl->writer->vtable->clean_up)(impl->writer);
+
+ aws_mem_release(impl->allocator, impl->channel);
+ aws_mem_release(impl->allocator, impl->formatter);
+ aws_mem_release(impl->allocator, impl->writer);
+
+ aws_mem_release(impl->allocator, impl);
+}
+
+/*
+ * Pipeline logger implementation
+ */
+static int s_aws_logger_pipeline_log(
+ struct aws_logger *logger,
+ enum aws_log_level log_level,
+ aws_log_subject_t subject,
+ const char *format,
+ ...) {
+ va_list format_args;
+ va_start(format_args, format);
+
+ struct aws_logger_pipeline *impl = logger->p_impl;
+ struct aws_string *output = NULL;
+
+ AWS_ASSERT(impl->formatter->vtable->format != NULL);
+ int result = (impl->formatter->vtable->format)(impl->formatter, &output, log_level, subject, format, format_args);
+
+ va_end(format_args);
+
+ if (result != AWS_OP_SUCCESS || output == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_ASSERT(impl->channel->vtable->send != NULL);
+ if ((impl->channel->vtable->send)(impl->channel, output)) {
+ /*
+ * failure to send implies failure to transfer ownership
+ */
+ aws_string_destroy(output);
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static enum aws_log_level s_aws_logger_pipeline_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) {
+ (void)subject;
+
+ struct aws_logger_pipeline *impl = logger->p_impl;
+
+ return impl->level;
+}
+
+struct aws_logger_vtable g_pipeline_logger_owned_vtable = {
+ .get_log_level = s_aws_logger_pipeline_get_log_level,
+ .log = s_aws_logger_pipeline_log,
+ .clean_up = s_aws_logger_pipeline_owned_clean_up,
+};
+
+int aws_logger_init_standard(
+ struct aws_logger *logger,
+ struct aws_allocator *allocator,
+ struct aws_logger_standard_options *options) {
+
+#ifdef ANDROID
+ (void)options;
+ extern int aws_logger_init_logcat(
+ struct aws_logger *, struct aws_allocator *, struct aws_logger_standard_options *);
+ return aws_logger_init_logcat(logger, allocator, options);
+#endif
+
+ struct aws_logger_pipeline *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_logger_pipeline));
+ if (impl == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_log_writer *writer = aws_mem_acquire(allocator, sizeof(struct aws_log_writer));
+ if (writer == NULL) {
+ goto on_allocate_writer_failure;
+ }
+
+ struct aws_log_writer_file_options file_writer_options = {
+ .filename = options->filename,
+ .file = options->file,
+ };
+
+ if (aws_log_writer_init_file(writer, allocator, &file_writer_options)) {
+ goto on_init_writer_failure;
+ }
+
+ struct aws_log_formatter *formatter = aws_mem_acquire(allocator, sizeof(struct aws_log_formatter));
+ if (formatter == NULL) {
+ goto on_allocate_formatter_failure;
+ }
+
+ struct aws_log_formatter_standard_options formatter_options = {.date_format = AWS_DATE_FORMAT_ISO_8601};
+
+ if (aws_log_formatter_init_default(formatter, allocator, &formatter_options)) {
+ goto on_init_formatter_failure;
+ }
+
+ struct aws_log_channel *channel = aws_mem_acquire(allocator, sizeof(struct aws_log_channel));
+ if (channel == NULL) {
+ goto on_allocate_channel_failure;
+ }
+
+ if (aws_log_channel_init_background(channel, allocator, writer) == AWS_OP_SUCCESS) {
+ impl->formatter = formatter;
+ impl->channel = channel;
+ impl->writer = writer;
+ impl->allocator = allocator;
+ impl->level = options->level;
+
+ logger->vtable = &g_pipeline_logger_owned_vtable;
+ logger->allocator = allocator;
+ logger->p_impl = impl;
+
+ return AWS_OP_SUCCESS;
+ }
+
+ aws_mem_release(allocator, channel);
+
+on_allocate_channel_failure:
+ aws_log_formatter_clean_up(formatter);
+
+on_init_formatter_failure:
+ aws_mem_release(allocator, formatter);
+
+on_allocate_formatter_failure:
+ aws_log_writer_clean_up(writer);
+
+on_init_writer_failure:
+ aws_mem_release(allocator, writer);
+
+on_allocate_writer_failure:
+ aws_mem_release(allocator, impl);
+
+ return AWS_OP_ERR;
+}
+
+/*
+ * Pipeline logger implementation where all the components are externally owned. No clean up
+ * is done on the components. Useful for tests where components are on the stack and often mocked.
+ */
+static void s_aws_pipeline_logger_unowned_clean_up(struct aws_logger *logger) {
+ struct aws_logger_pipeline *impl = (struct aws_logger_pipeline *)logger->p_impl;
+
+ aws_mem_release(impl->allocator, impl);
+}
+
+static struct aws_logger_vtable s_pipeline_logger_unowned_vtable = {
+ .get_log_level = s_aws_logger_pipeline_get_log_level,
+ .log = s_aws_logger_pipeline_log,
+ .clean_up = s_aws_pipeline_logger_unowned_clean_up,
+};
+
+int aws_logger_init_from_external(
+ struct aws_logger *logger,
+ struct aws_allocator *allocator,
+ struct aws_log_formatter *formatter,
+ struct aws_log_channel *channel,
+ struct aws_log_writer *writer,
+ enum aws_log_level level) {
+
+ struct aws_logger_pipeline *impl = aws_mem_acquire(allocator, sizeof(struct aws_logger_pipeline));
+
+ if (impl == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ impl->formatter = formatter;
+ impl->channel = channel;
+ impl->writer = writer;
+ impl->allocator = allocator;
+ impl->level = level;
+
+ logger->vtable = &s_pipeline_logger_unowned_vtable;
+ logger->allocator = allocator;
+ logger->p_impl = impl;
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Global API
+ */
+static struct aws_logger *s_root_logger_ptr = &s_null_logger;
+
+void aws_logger_set(struct aws_logger *logger) {
+ if (logger != NULL) {
+ s_root_logger_ptr = logger;
+ } else {
+ s_root_logger_ptr = &s_null_logger;
+ }
+}
+
+struct aws_logger *aws_logger_get(void) {
+ return s_root_logger_ptr;
+}
+
+void aws_logger_clean_up(struct aws_logger *logger) {
+ AWS_ASSERT(logger->vtable->clean_up != NULL);
+
+ logger->vtable->clean_up(logger);
+}
+
+static const char *s_log_level_strings[AWS_LL_COUNT] = {"NONE", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"};
+
+int aws_log_level_to_string(enum aws_log_level log_level, const char **level_string) {
+ AWS_ERROR_PRECONDITION(log_level < AWS_LL_COUNT);
+
+ if (level_string != NULL) {
+ *level_string = s_log_level_strings[log_level];
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_string_to_log_level(const char *level_string, enum aws_log_level *log_level) {
+ if (level_string != NULL && log_level != NULL) {
+ size_t level_length = strlen(level_string);
+ for (int i = 0; i < AWS_LL_COUNT; ++i) {
+ if (aws_array_eq_c_str_ignore_case(level_string, level_length, s_log_level_strings[i])) {
+ *log_level = i;
+ return AWS_OP_SUCCESS;
+ }
+ }
+ }
+
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return AWS_OP_ERR;
+}
+
+int aws_thread_id_t_to_string(aws_thread_id_t thread_id, char *buffer, size_t bufsz) {
+ AWS_ERROR_PRECONDITION(AWS_THREAD_ID_T_REPR_BUFSZ == bufsz);
+ AWS_ERROR_PRECONDITION(buffer && AWS_MEM_IS_WRITABLE(buffer, bufsz));
+ size_t current_index = 0;
+ unsigned char *bytes = (unsigned char *)&thread_id;
+ for (size_t i = sizeof(aws_thread_id_t); i != 0; --i) {
+ unsigned char c = bytes[i - 1];
+ int written = snprintf(buffer + current_index, bufsz - current_index, "%02x", c);
+ if (written < 0) {
+ return AWS_OP_ERR;
+ }
+ current_index += written;
+ if (bufsz <= current_index) {
+ return AWS_OP_ERR;
+ }
+ }
+ return AWS_OP_SUCCESS;
+}
+
+#define AWS_LOG_SUBJECT_SPACE_MASK (AWS_LOG_SUBJECT_STRIDE - 1)
+
+static const uint32_t S_MAX_LOG_SUBJECT = AWS_LOG_SUBJECT_STRIDE * AWS_PACKAGE_SLOTS - 1;
+
+static const struct aws_log_subject_info_list *volatile s_log_subject_slots[AWS_PACKAGE_SLOTS] = {0};
+
+static const struct aws_log_subject_info *s_get_log_subject_info_by_id(aws_log_subject_t subject) {
+ if (subject > S_MAX_LOG_SUBJECT) {
+ return NULL;
+ }
+
+ uint32_t slot_index = subject >> AWS_LOG_SUBJECT_STRIDE_BITS;
+ uint32_t subject_index = subject & AWS_LOG_SUBJECT_SPACE_MASK;
+
+ const struct aws_log_subject_info_list *subject_slot = s_log_subject_slots[slot_index];
+
+ if (!subject_slot || subject_index >= subject_slot->count) {
+ return NULL;
+ }
+
+ return &subject_slot->subject_list[subject_index];
+}
+
+const char *aws_log_subject_name(aws_log_subject_t subject) {
+ const struct aws_log_subject_info *subject_info = s_get_log_subject_info_by_id(subject);
+
+ if (subject_info != NULL) {
+ return subject_info->subject_name;
+ }
+
+ return "Unknown";
+}
+
+void aws_register_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list) {
+ /*
+ * We're not so worried about these asserts being removed in an NDEBUG build
+ * - we'll either segfault immediately (for the first two) or for the count
+ * assert, the registration will be ineffective.
+ */
+ AWS_FATAL_ASSERT(log_subject_list);
+ AWS_FATAL_ASSERT(log_subject_list->subject_list);
+ AWS_FATAL_ASSERT(log_subject_list->count);
+
+ const uint32_t min_range = log_subject_list->subject_list[0].subject_id;
+ const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS;
+
+ if (slot_index >= AWS_PACKAGE_SLOTS) {
+ /* This is an NDEBUG build apparently. Kill the process rather than
+ * corrupting heap. */
+ fprintf(stderr, "Bad log subject slot index 0x%016x\n", slot_index);
+ abort();
+ }
+
+ s_log_subject_slots[slot_index] = log_subject_list;
+}
+
+void aws_unregister_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list) {
+ /*
+ * We're not so worried about these asserts being removed in an NDEBUG build
+ * - we'll either segfault immediately (for the first two) or for the count
+ * assert, the registration will be ineffective.
+ */
+ AWS_FATAL_ASSERT(log_subject_list);
+ AWS_FATAL_ASSERT(log_subject_list->subject_list);
+ AWS_FATAL_ASSERT(log_subject_list->count);
+
+ const uint32_t min_range = log_subject_list->subject_list[0].subject_id;
+ const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS;
+
+ if (slot_index >= AWS_PACKAGE_SLOTS) {
+ /* This is an NDEBUG build apparently. Kill the process rather than
+ * corrupting heap. */
+ fprintf(stderr, "Bad log subject slot index 0x%016x\n", slot_index);
+ AWS_FATAL_ASSERT(false);
+ }
+
+ s_log_subject_slots[slot_index] = NULL;
+}
+
+/*
+ * no alloc implementation
+ */
+struct aws_logger_noalloc {
+ enum aws_log_level level;
+ FILE *file;
+ bool should_close;
+ struct aws_mutex lock;
+};
+
+static enum aws_log_level s_noalloc_stderr_logger_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) {
+ (void)subject;
+
+ struct aws_logger_noalloc *impl = logger->p_impl;
+ return impl->level;
+}
+
+#define MAXIMUM_NO_ALLOC_LOG_LINE_SIZE 8192
+
+static int s_noalloc_stderr_logger_log(
+ struct aws_logger *logger,
+ enum aws_log_level log_level,
+ aws_log_subject_t subject,
+ const char *format,
+ ...) {
+
+ char format_buffer[MAXIMUM_NO_ALLOC_LOG_LINE_SIZE];
+
+ va_list format_args;
+ va_start(format_args, format);
+
+#if _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4221) /* allow struct member to reference format_buffer */
+#endif
+
+ struct aws_logging_standard_formatting_data format_data = {
+ .log_line_buffer = format_buffer,
+ .total_length = MAXIMUM_NO_ALLOC_LOG_LINE_SIZE,
+ .level = log_level,
+ .subject_name = aws_log_subject_name(subject),
+ .format = format,
+ .date_format = AWS_DATE_FORMAT_ISO_8601,
+ .allocator = logger->allocator,
+ .amount_written = 0,
+ };
+
+#if _MSC_VER
+# pragma warning(pop) /* disallow struct member to reference local value */
+#endif
+
+ int result = aws_format_standard_log_line(&format_data, format_args);
+
+ va_end(format_args);
+
+ if (result == AWS_OP_ERR) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_logger_noalloc *impl = logger->p_impl;
+
+ aws_mutex_lock(&impl->lock);
+
+ if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) {
+ return aws_translate_and_raise_io_error(errno);
+ }
+
+ aws_mutex_unlock(&impl->lock);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_noalloc_stderr_logger_clean_up(struct aws_logger *logger) {
+ if (logger == NULL) {
+ return;
+ }
+
+ struct aws_logger_noalloc *impl = logger->p_impl;
+ if (impl->should_close) {
+ fclose(impl->file);
+ }
+
+ aws_mutex_clean_up(&impl->lock);
+
+ aws_mem_release(logger->allocator, impl);
+ AWS_ZERO_STRUCT(*logger);
+}
+
+static struct aws_logger_vtable s_noalloc_stderr_vtable = {
+ .get_log_level = s_noalloc_stderr_logger_get_log_level,
+ .log = s_noalloc_stderr_logger_log,
+ .clean_up = s_noalloc_stderr_logger_clean_up,
+};
+
+int aws_logger_init_noalloc(
+ struct aws_logger *logger,
+ struct aws_allocator *allocator,
+ struct aws_logger_standard_options *options) {
+
+ struct aws_logger_noalloc *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_logger_noalloc));
+
+ if (impl == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ impl->level = options->level;
+ if (options->file != NULL) {
+ impl->file = options->file;
+ impl->should_close = false;
+ } else { /* _MSC_VER */
+ impl->file = fopen(options->filename, "w");
+ impl->should_close = true;
+ }
+
+ aws_mutex_init(&impl->lock);
+
+ logger->vtable = &s_noalloc_stderr_vtable;
+ logger->allocator = allocator;
+ logger->p_impl = impl;
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/lru_cache.c b/contrib/restricted/aws/aws-c-common/source/lru_cache.c
index dcd1f2267c..15de626b96 100644
--- a/contrib/restricted/aws/aws-c-common/source/lru_cache.c
+++ b/contrib/restricted/aws/aws-c-common/source/lru_cache.c
@@ -1,28 +1,28 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/lru_cache.h>
-static int s_lru_cache_put(struct aws_cache *cache, const void *key, void *p_value);
-static int s_lru_cache_find(struct aws_cache *cache, const void *key, void **p_value);
-static void *s_lru_cache_use_lru_element(struct aws_cache *cache);
-static void *s_lru_cache_get_mru_element(const struct aws_cache *cache);
+static int s_lru_cache_put(struct aws_cache *cache, const void *key, void *p_value);
+static int s_lru_cache_find(struct aws_cache *cache, const void *key, void **p_value);
+static void *s_lru_cache_use_lru_element(struct aws_cache *cache);
+static void *s_lru_cache_get_mru_element(const struct aws_cache *cache);
-struct lru_cache_impl_vtable {
- void *(*use_lru_element)(struct aws_cache *cache);
- void *(*get_mru_element)(const struct aws_cache *cache);
+struct lru_cache_impl_vtable {
+ void *(*use_lru_element)(struct aws_cache *cache);
+ void *(*get_mru_element)(const struct aws_cache *cache);
};
-static struct aws_cache_vtable s_lru_cache_vtable = {
- .destroy = aws_cache_base_default_destroy,
- .find = s_lru_cache_find,
- .put = s_lru_cache_put,
- .remove = aws_cache_base_default_remove,
- .clear = aws_cache_base_default_clear,
- .get_element_count = aws_cache_base_default_get_element_count,
-};
+static struct aws_cache_vtable s_lru_cache_vtable = {
+ .destroy = aws_cache_base_default_destroy,
+ .find = s_lru_cache_find,
+ .put = s_lru_cache_put,
+ .remove = aws_cache_base_default_remove,
+ .clear = aws_cache_base_default_clear,
+ .get_element_count = aws_cache_base_default_get_element_count,
+};
-struct aws_cache *aws_cache_new_lru(
+struct aws_cache *aws_cache_new_lru(
struct aws_allocator *allocator,
aws_hash_fn *hash_fn,
aws_hash_callback_eq_fn *equals_fn,
@@ -31,81 +31,81 @@ struct aws_cache *aws_cache_new_lru(
size_t max_items) {
AWS_ASSERT(allocator);
AWS_ASSERT(max_items);
- struct aws_cache *lru_cache = NULL;
- struct lru_cache_impl_vtable *impl = NULL;
+ struct aws_cache *lru_cache = NULL;
+ struct lru_cache_impl_vtable *impl = NULL;
- if (!aws_mem_acquire_many(
- allocator, 2, &lru_cache, sizeof(struct aws_cache), &impl, sizeof(struct lru_cache_impl_vtable))) {
- return NULL;
+ if (!aws_mem_acquire_many(
+ allocator, 2, &lru_cache, sizeof(struct aws_cache), &impl, sizeof(struct lru_cache_impl_vtable))) {
+ return NULL;
}
- impl->use_lru_element = s_lru_cache_use_lru_element;
- impl->get_mru_element = s_lru_cache_get_mru_element;
- lru_cache->allocator = allocator;
- lru_cache->max_items = max_items;
- lru_cache->vtable = &s_lru_cache_vtable;
- lru_cache->impl = impl;
- if (aws_linked_hash_table_init(
- &lru_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) {
- return NULL;
- }
- return lru_cache;
+ impl->use_lru_element = s_lru_cache_use_lru_element;
+ impl->get_mru_element = s_lru_cache_get_mru_element;
+ lru_cache->allocator = allocator;
+ lru_cache->max_items = max_items;
+ lru_cache->vtable = &s_lru_cache_vtable;
+ lru_cache->impl = impl;
+ if (aws_linked_hash_table_init(
+ &lru_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) {
+ return NULL;
+ }
+ return lru_cache;
}
-/* implementation for lru cache put */
-static int s_lru_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
+/* implementation for lru cache put */
+static int s_lru_cache_put(struct aws_cache *cache, const void *key, void *p_value) {
- if (aws_linked_hash_table_put(&cache->table, key, p_value)) {
+ if (aws_linked_hash_table_put(&cache->table, key, p_value)) {
return AWS_OP_ERR;
}
- /* Manage the space if we actually added a new element and the cache is full. */
- if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) {
- /* we're over the cache size limit. Remove whatever is in the front of
- * the linked_hash_table, which is the LRU element */
- const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
- struct aws_linked_list_node *node = aws_linked_list_front(list);
- struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
- return aws_linked_hash_table_remove(&cache->table, table_node->key);
+ /* Manage the space if we actually added a new element and the cache is full. */
+ if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) {
+ /* we're over the cache size limit. Remove whatever is in the front of
+ * the linked_hash_table, which is the LRU element */
+ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
+ struct aws_linked_list_node *node = aws_linked_list_front(list);
+ struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
+ return aws_linked_hash_table_remove(&cache->table, table_node->key);
}
return AWS_OP_SUCCESS;
}
-/* implementation for lru cache find */
-static int s_lru_cache_find(struct aws_cache *cache, const void *key, void **p_value) {
- return (aws_linked_hash_table_find_and_move_to_back(&cache->table, key, p_value));
+/* implementation for lru cache find */
+static int s_lru_cache_find(struct aws_cache *cache, const void *key, void **p_value) {
+ return (aws_linked_hash_table_find_and_move_to_back(&cache->table, key, p_value));
}
-static void *s_lru_cache_use_lru_element(struct aws_cache *cache) {
- const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
- if (aws_linked_list_empty(list)) {
+static void *s_lru_cache_use_lru_element(struct aws_cache *cache) {
+ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
+ if (aws_linked_list_empty(list)) {
return NULL;
}
- struct aws_linked_list_node *node = aws_linked_list_front(list);
- struct aws_linked_hash_table_node *lru_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
+ struct aws_linked_list_node *node = aws_linked_list_front(list);
+ struct aws_linked_hash_table_node *lru_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
- aws_linked_hash_table_move_node_to_end_of_list(&cache->table, lru_node);
- return lru_node->value;
+ aws_linked_hash_table_move_node_to_end_of_list(&cache->table, lru_node);
+ return lru_node->value;
}
-static void *s_lru_cache_get_mru_element(const struct aws_cache *cache) {
- const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
- if (aws_linked_list_empty(list)) {
+static void *s_lru_cache_get_mru_element(const struct aws_cache *cache) {
+ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table);
+ if (aws_linked_list_empty(list)) {
return NULL;
}
- struct aws_linked_list_node *node = aws_linked_list_back(list);
- struct aws_linked_hash_table_node *mru_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
- return mru_node->value;
-}
+ struct aws_linked_list_node *node = aws_linked_list_back(list);
+ struct aws_linked_hash_table_node *mru_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node);
+ return mru_node->value;
+}
-void *aws_lru_cache_use_lru_element(struct aws_cache *cache) {
- AWS_PRECONDITION(cache);
- AWS_PRECONDITION(cache->impl);
- struct lru_cache_impl_vtable *impl_vtable = cache->impl;
- return impl_vtable->use_lru_element(cache);
+void *aws_lru_cache_use_lru_element(struct aws_cache *cache) {
+ AWS_PRECONDITION(cache);
+ AWS_PRECONDITION(cache->impl);
+ struct lru_cache_impl_vtable *impl_vtable = cache->impl;
+ return impl_vtable->use_lru_element(cache);
}
-void *aws_lru_cache_get_mru_element(const struct aws_cache *cache) {
- AWS_PRECONDITION(cache);
- AWS_PRECONDITION(cache->impl);
- struct lru_cache_impl_vtable *impl_vtable = cache->impl;
- return impl_vtable->get_mru_element(cache);
+void *aws_lru_cache_get_mru_element(const struct aws_cache *cache) {
+ AWS_PRECONDITION(cache);
+ AWS_PRECONDITION(cache->impl);
+ struct lru_cache_impl_vtable *impl_vtable = cache->impl;
+ return impl_vtable->get_mru_element(cache);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/math.c b/contrib/restricted/aws/aws-c-common/source/math.c
index 0659d3b064..40d833c1a9 100644
--- a/contrib/restricted/aws/aws-c-common/source/math.c
+++ b/contrib/restricted/aws/aws-c-common/source/math.c
@@ -1,24 +1,24 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/math.h>
-#include <stdarg.h>
-
-AWS_COMMON_API int aws_add_size_checked_varargs(size_t num, size_t *r, ...) {
- va_list argp;
- va_start(argp, r);
-
- size_t accum = 0;
- for (size_t i = 0; i < num; ++i) {
- size_t next = va_arg(argp, size_t);
- if (aws_add_size_checked(accum, next, &accum) == AWS_OP_ERR) {
- va_end(argp);
- return AWS_OP_ERR;
- }
- }
- *r = accum;
- va_end(argp);
- return AWS_OP_SUCCESS;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/math.h>
+#include <stdarg.h>
+
+AWS_COMMON_API int aws_add_size_checked_varargs(size_t num, size_t *r, ...) {
+ va_list argp;
+ va_start(argp, r);
+
+ size_t accum = 0;
+ for (size_t i = 0; i < num; ++i) {
+ size_t next = va_arg(argp, size_t);
+ if (aws_add_size_checked(accum, next, &accum) == AWS_OP_ERR) {
+ va_end(argp);
+ return AWS_OP_ERR;
+ }
+ }
+ *r = accum;
+ va_end(argp);
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/memtrace.c b/contrib/restricted/aws/aws-c-common/source/memtrace.c
index dcfd836d07..9b776211f9 100644
--- a/contrib/restricted/aws/aws-c-common/source/memtrace.c
+++ b/contrib/restricted/aws/aws-c-common/source/memtrace.c
@@ -1,527 +1,527 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/atomics.h>
-#include <aws/common/byte_buf.h>
-#include <aws/common/hash_table.h>
-#include <aws/common/logging.h>
-#include <aws/common/mutex.h>
-#include <aws/common/priority_queue.h>
-#include <aws/common/string.h>
-#include <aws/common/system_info.h>
-#include <aws/common/time.h>
-
-/* describes a single live allocation.
- * allocated by aws_default_allocator() */
-struct alloc_info {
- size_t size;
- time_t time;
- uint64_t stack; /* hash of stack frame pointers */
-};
-
-/* Using a flexible array member is the C99 compliant way to have the frames immediately follow the header.
- *
- * MSVC doesn't know this for some reason so we need to use a pragma to make
- * it happy.
- */
-#ifdef _MSC_VER
-# pragma warning(push)
-# pragma warning(disable : 4200) /* nonstandard extension used: zero-sized array in struct/union */
-#endif
-
-/* one of these is stored per unique stack
- * allocated by aws_default_allocator() */
-struct stack_trace {
- size_t depth; /* length of frames[] */
- void *const frames[]; /* rest of frames are allocated after */
-};
-
-#ifdef _MSC_VER
-# pragma warning(pop)
-#endif
-
-/* Tracking structure, used as the allocator impl.
- * This structure, and all its bookkeeping datastructures, are created with the aws_default_allocator().
- * This is not customizeable because it's too expensive for every little allocation to store
- * a pointer back to its original allocator. */
-struct alloc_tracer {
- struct aws_allocator *traced_allocator; /* underlying allocator */
- enum aws_mem_trace_level level; /* level to trace at */
- size_t frames_per_stack; /* how many frames to keep per stack */
- struct aws_atomic_var allocated; /* bytes currently allocated */
- struct aws_mutex mutex; /* protects everything below */
- struct aws_hash_table allocs; /* live allocations, maps address -> alloc_info */
- struct aws_hash_table stacks; /* unique stack traces, maps hash -> stack_trace */
-};
-
-/* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */
-#define FRAMES_TO_SKIP 2
-
-static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size);
-static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr);
-static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size);
-static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
-
-static struct aws_allocator s_trace_allocator = {
- .mem_acquire = s_trace_mem_acquire,
- .mem_release = s_trace_mem_release,
- .mem_realloc = s_trace_mem_realloc,
- .mem_calloc = s_trace_mem_calloc,
-};
-
-/* for the hash table, to destroy elements */
-static void s_destroy_alloc(void *data) {
- struct alloc_info *alloc = data;
- aws_mem_release(aws_default_allocator(), alloc);
-}
-
-static void s_destroy_stacktrace(void *data) {
- struct stack_trace *stack = data;
- aws_mem_release(aws_default_allocator(), stack);
-}
-
-static void s_alloc_tracer_init(
- struct alloc_tracer *tracer,
- struct aws_allocator *traced_allocator,
- enum aws_mem_trace_level level,
- size_t frames_per_stack) {
-
- void *stack[1];
- if (!aws_backtrace(stack, 1)) {
- /* clamp level if tracing isn't available */
- level = level > AWS_MEMTRACE_BYTES ? AWS_MEMTRACE_BYTES : level;
- }
-
- tracer->traced_allocator = traced_allocator;
- tracer->level = level;
-
- if (tracer->level >= AWS_MEMTRACE_BYTES) {
- aws_atomic_init_int(&tracer->allocated, 0);
- AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mutex_init(&tracer->mutex));
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS ==
- aws_hash_table_init(
- &tracer->allocs, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_alloc));
- }
-
- if (tracer->level == AWS_MEMTRACE_STACKS) {
- if (frames_per_stack > 128) {
- frames_per_stack = 128;
- }
- tracer->frames_per_stack = (frames_per_stack) ? frames_per_stack : 8;
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS ==
- aws_hash_table_init(
- &tracer->stacks, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_stacktrace));
- }
-}
-
-static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t size) {
- if (tracer->level == AWS_MEMTRACE_NONE) {
- return;
- }
-
- aws_atomic_fetch_add(&tracer->allocated, size);
-
- struct alloc_info *alloc = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct alloc_info));
- AWS_FATAL_ASSERT(alloc);
- alloc->size = size;
- alloc->time = time(NULL);
-
- if (tracer->level == AWS_MEMTRACE_STACKS) {
- /* capture stack frames, skip 2 for this function and the allocation vtable function */
- AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack));
- size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack);
- if (stack_depth) {
- /* hash the stack pointers */
- struct aws_byte_cursor stack_cursor =
- aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *));
- uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor);
- alloc->stack = stack_id; /* associate the stack with the alloc */
-
- aws_mutex_lock(&tracer->mutex);
- struct aws_hash_element *item = NULL;
- int was_created = 0;
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS ==
- aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created));
- /* If this is a new stack, save it to the hash */
- if (was_created) {
- struct stack_trace *stack = aws_mem_calloc(
- aws_default_allocator(),
- 1,
- sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack));
- AWS_FATAL_ASSERT(stack);
- memcpy(
- (void **)&stack->frames[0],
- &stack_frames[FRAMES_TO_SKIP],
- (stack_depth - FRAMES_TO_SKIP) * sizeof(void *));
- stack->depth = stack_depth - FRAMES_TO_SKIP;
- item->value = stack;
- }
- aws_mutex_unlock(&tracer->mutex);
- }
- }
-
- aws_mutex_lock(&tracer->mutex);
- AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_put(&tracer->allocs, ptr, alloc, NULL));
- aws_mutex_unlock(&tracer->mutex);
-}
-
-static void s_alloc_tracer_untrack(struct alloc_tracer *tracer, void *ptr) {
- if (tracer->level == AWS_MEMTRACE_NONE) {
- return;
- }
-
- aws_mutex_lock(&tracer->mutex);
- struct aws_hash_element *item;
- AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(&tracer->allocs, ptr, &item));
- /* because the tracer can be installed at any time, it is possible for an allocation to not
- * be tracked. Therefore, we make sure the find succeeds, but then check the returned
- * value */
- if (item) {
- AWS_FATAL_ASSERT(item->key == ptr && item->value);
- struct alloc_info *alloc = item->value;
- aws_atomic_fetch_sub(&tracer->allocated, alloc->size);
- s_destroy_alloc(item->value);
- AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_remove_element(&tracer->allocs, item));
- }
- aws_mutex_unlock(&tracer->mutex);
-}
-
-/* used only to resolve stacks -> trace, count, size at dump time */
-struct stack_metadata {
- struct aws_string *trace;
- size_t count;
- size_t size;
-};
-
-static int s_collect_stack_trace(void *context, struct aws_hash_element *item) {
- struct alloc_tracer *tracer = context;
- struct aws_hash_table *all_stacks = &tracer->stacks;
- struct stack_metadata *stack_info = item->value;
- struct aws_hash_element *stack_item = NULL;
- AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(all_stacks, item->key, &stack_item));
- AWS_FATAL_ASSERT(stack_item);
- struct stack_trace *stack = stack_item->value;
- void *const *stack_frames = &stack->frames[0];
-
- /* convert the frame pointers to symbols, and concat into a buffer */
- char buf[4096] = {0};
- struct aws_byte_buf stacktrace = aws_byte_buf_from_empty_array(buf, AWS_ARRAY_SIZE(buf));
- struct aws_byte_cursor newline = aws_byte_cursor_from_c_str("\n");
- char **symbols = aws_backtrace_symbols(stack_frames, stack->depth);
- for (size_t idx = 0; idx < stack->depth; ++idx) {
- if (idx > 0) {
- aws_byte_buf_append(&stacktrace, &newline);
- }
- const char *caller = symbols[idx];
- if (!caller || !caller[0]) {
- break;
- }
- struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller);
- aws_byte_buf_append(&stacktrace, &cursor);
- }
- free(symbols);
- /* record the resultant buffer as a string */
- stack_info->trace = aws_string_new_from_array(aws_default_allocator(), stacktrace.buffer, stacktrace.len);
- AWS_FATAL_ASSERT(stack_info->trace);
- aws_byte_buf_clean_up(&stacktrace);
- return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
-}
-
-static int s_stack_info_compare_size(const void *a, const void *b) {
- const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
- const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
- return stack_b->size > stack_a->size;
-}
-
-static int s_stack_info_compare_count(const void *a, const void *b) {
- const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
- const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
- return stack_b->count > stack_a->count;
-}
-
-static void s_stack_info_destroy(void *data) {
- struct stack_metadata *stack = data;
- struct aws_allocator *allocator = stack->trace->allocator;
- aws_string_destroy(stack->trace);
- aws_mem_release(allocator, stack);
-}
-
-/* tally up count/size per stack from all allocs */
-static int s_collect_stack_stats(void *context, struct aws_hash_element *item) {
- struct aws_hash_table *stack_info = context;
- struct alloc_info *alloc = item->value;
- struct aws_hash_element *stack_item = NULL;
- int was_created = 0;
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS ==
- aws_hash_table_create(stack_info, (void *)(uintptr_t)alloc->stack, &stack_item, &was_created));
- if (was_created) {
- stack_item->value = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct stack_metadata));
- AWS_FATAL_ASSERT(stack_item->value);
- }
- struct stack_metadata *stack = stack_item->value;
- stack->count++;
- stack->size += alloc->size;
- return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
-}
-
-static int s_insert_stacks(void *context, struct aws_hash_element *item) {
- struct aws_priority_queue *pq = context;
- struct stack_metadata *stack = item->value;
- AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(pq, &stack));
- return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
-}
-
-static int s_insert_allocs(void *context, struct aws_hash_element *item) {
- struct aws_priority_queue *allocs = context;
- struct alloc_info *alloc = item->value;
- AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(allocs, &alloc));
- return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
-}
-
-static int s_alloc_compare(const void *a, const void *b) {
- const struct alloc_info *alloc_a = *(const struct alloc_info **)a;
- const struct alloc_info *alloc_b = *(const struct alloc_info **)b;
- return alloc_a->time > alloc_b->time;
-}
-
-void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
- struct alloc_tracer *tracer = trace_allocator->impl;
- if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) {
- return;
- }
-
- aws_mutex_lock(&tracer->mutex);
-
- size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs);
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "tracer: %zu bytes still allocated in %zu allocations\n",
- aws_atomic_load_int(&tracer->allocated),
- num_allocs);
-
- /* convert stacks from pointers -> symbols */
- struct aws_hash_table stack_info;
- AWS_ZERO_STRUCT(stack_info);
- if (tracer->level == AWS_MEMTRACE_STACKS) {
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS ==
- aws_hash_table_init(
- &stack_info, aws_default_allocator(), 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy));
- /* collect active stacks, tally up sizes and counts */
- aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, &stack_info);
- /* collect stack traces for active stacks */
- aws_hash_table_foreach(&stack_info, s_collect_stack_trace, tracer);
- }
-
- /* sort allocs by time */
- struct aws_priority_queue allocs;
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS ==
- aws_priority_queue_init_dynamic(
- &allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare));
- aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs);
- /* dump allocs by time */
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
- while (aws_priority_queue_size(&allocs)) {
- struct alloc_info *alloc = NULL;
- aws_priority_queue_pop(&allocs, &alloc);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n", alloc->size);
- if (alloc->stack) {
- struct aws_hash_element *item = NULL;
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item));
- struct stack_metadata *stack = item->value;
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, " stacktrace:\n%s\n", (const char *)aws_string_bytes(stack->trace));
- }
- }
-
- aws_priority_queue_clean_up(&allocs);
-
- if (tracer->level == AWS_MEMTRACE_STACKS) {
- size_t num_stacks = aws_hash_table_get_entry_count(&stack_info);
- /* sort stacks by total size leaked */
- struct aws_priority_queue stacks_by_size;
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
- &stacks_by_size,
- aws_default_allocator(),
- num_stacks,
- sizeof(struct stack_metadata *),
- s_stack_info_compare_size));
- aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size);
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
- while (aws_priority_queue_size(&stacks_by_size) > 0) {
- struct stack_metadata *stack = NULL;
- aws_priority_queue_pop(&stacks_by_size, &stack);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n", stack->size, stack->count);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
- }
- aws_priority_queue_clean_up(&stacks_by_size);
-
- /* sort stacks by number of leaks */
- struct aws_priority_queue stacks_by_count;
- AWS_FATAL_ASSERT(
- AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
- &stacks_by_count,
- aws_default_allocator(),
- num_stacks,
- sizeof(struct stack_metadata *),
- s_stack_info_compare_count));
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
- aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count);
- while (aws_priority_queue_size(&stacks_by_count) > 0) {
- struct stack_metadata *stack = NULL;
- aws_priority_queue_pop(&stacks_by_count, &stack);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n", stack->count, stack->size);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
- }
- aws_priority_queue_clean_up(&stacks_by_count);
- aws_hash_table_clean_up(&stack_info);
- }
-
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
-
- aws_mutex_unlock(&tracer->mutex);
-}
-
-static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size) {
- struct alloc_tracer *tracer = allocator->impl;
- void *ptr = aws_mem_acquire(tracer->traced_allocator, size);
- if (ptr) {
- s_alloc_tracer_track(tracer, ptr, size);
- }
- return ptr;
-}
-
-static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) {
- struct alloc_tracer *tracer = allocator->impl;
- s_alloc_tracer_untrack(tracer, ptr);
- aws_mem_release(tracer->traced_allocator, ptr);
-}
-
-static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
- struct alloc_tracer *tracer = allocator->impl;
- void *new_ptr = old_ptr;
- if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) {
- return NULL;
- }
-
- s_alloc_tracer_untrack(tracer, old_ptr);
- s_alloc_tracer_track(tracer, new_ptr, new_size);
-
- return new_ptr;
-}
-
-static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- struct alloc_tracer *tracer = allocator->impl;
- void *ptr = aws_mem_calloc(tracer->traced_allocator, num, size);
- if (ptr) {
- s_alloc_tracer_track(tracer, ptr, num * size);
- }
- return ptr;
-}
-
-struct aws_allocator *aws_mem_tracer_new(
- struct aws_allocator *allocator,
- struct aws_allocator *deprecated,
- enum aws_mem_trace_level level,
- size_t frames_per_stack) {
-
- /* deprecated customizeable bookkeeping allocator */
- (void)deprecated;
-
- struct alloc_tracer *tracer = NULL;
- struct aws_allocator *trace_allocator = NULL;
- aws_mem_acquire_many(
- aws_default_allocator(),
- 2,
- &tracer,
- sizeof(struct alloc_tracer),
- &trace_allocator,
- sizeof(struct aws_allocator));
-
- AWS_FATAL_ASSERT(trace_allocator);
- AWS_FATAL_ASSERT(tracer);
-
- AWS_ZERO_STRUCT(*trace_allocator);
- AWS_ZERO_STRUCT(*tracer);
-
- /* copy the template vtable s*/
- *trace_allocator = s_trace_allocator;
- trace_allocator->impl = tracer;
-
- s_alloc_tracer_init(tracer, allocator, level, frames_per_stack);
- return trace_allocator;
-}
-
-struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator) {
- struct alloc_tracer *tracer = trace_allocator->impl;
- struct aws_allocator *allocator = tracer->traced_allocator;
-
- if (tracer->level != AWS_MEMTRACE_NONE) {
- aws_mutex_lock(&tracer->mutex);
- aws_hash_table_clean_up(&tracer->allocs);
- aws_hash_table_clean_up(&tracer->stacks);
- aws_mutex_unlock(&tracer->mutex);
- aws_mutex_clean_up(&tracer->mutex);
- }
-
- aws_mem_release(aws_default_allocator(), tracer);
- /* trace_allocator is freed as part of the block tracer was allocated in */
-
- return allocator;
-}
-
-size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator) {
- struct alloc_tracer *tracer = trace_allocator->impl;
- if (tracer->level == AWS_MEMTRACE_NONE) {
- return 0;
- }
-
- return aws_atomic_load_int(&tracer->allocated);
-}
-
-size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator) {
- struct alloc_tracer *tracer = trace_allocator->impl;
- if (tracer->level == AWS_MEMTRACE_NONE) {
- return 0;
- }
-
- aws_mutex_lock(&tracer->mutex);
- size_t count = aws_hash_table_get_entry_count(&tracer->allocs);
- aws_mutex_unlock(&tracer->mutex);
- return count;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/atomics.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/logging.h>
+#include <aws/common/mutex.h>
+#include <aws/common/priority_queue.h>
+#include <aws/common/string.h>
+#include <aws/common/system_info.h>
+#include <aws/common/time.h>
+
+/* describes a single live allocation.
+ * allocated by aws_default_allocator() */
+struct alloc_info {
+ size_t size;
+ time_t time;
+ uint64_t stack; /* hash of stack frame pointers */
+};
+
+/* Using a flexible array member is the C99 compliant way to have the frames immediately follow the header.
+ *
+ * MSVC doesn't know this for some reason so we need to use a pragma to make
+ * it happy.
+ */
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4200) /* nonstandard extension used: zero-sized array in struct/union */
+#endif
+
+/* one of these is stored per unique stack
+ * allocated by aws_default_allocator() */
+struct stack_trace {
+ size_t depth; /* length of frames[] */
+ void *const frames[]; /* rest of frames are allocated after */
+};
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+/* Tracking structure, used as the allocator impl.
+ * This structure, and all its bookkeeping datastructures, are created with the aws_default_allocator().
+ * This is not customizeable because it's too expensive for every little allocation to store
+ * a pointer back to its original allocator. */
+struct alloc_tracer {
+ struct aws_allocator *traced_allocator; /* underlying allocator */
+ enum aws_mem_trace_level level; /* level to trace at */
+ size_t frames_per_stack; /* how many frames to keep per stack */
+ struct aws_atomic_var allocated; /* bytes currently allocated */
+ struct aws_mutex mutex; /* protects everything below */
+ struct aws_hash_table allocs; /* live allocations, maps address -> alloc_info */
+ struct aws_hash_table stacks; /* unique stack traces, maps hash -> stack_trace */
+};
+
+/* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */
+#define FRAMES_TO_SKIP 2
+
+static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size);
+static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr);
+static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size);
+static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
+
+static struct aws_allocator s_trace_allocator = {
+ .mem_acquire = s_trace_mem_acquire,
+ .mem_release = s_trace_mem_release,
+ .mem_realloc = s_trace_mem_realloc,
+ .mem_calloc = s_trace_mem_calloc,
+};
+
+/* for the hash table, to destroy elements */
+static void s_destroy_alloc(void *data) {
+ struct alloc_info *alloc = data;
+ aws_mem_release(aws_default_allocator(), alloc);
+}
+
+static void s_destroy_stacktrace(void *data) {
+ struct stack_trace *stack = data;
+ aws_mem_release(aws_default_allocator(), stack);
+}
+
+static void s_alloc_tracer_init(
+ struct alloc_tracer *tracer,
+ struct aws_allocator *traced_allocator,
+ enum aws_mem_trace_level level,
+ size_t frames_per_stack) {
+
+ void *stack[1];
+ if (!aws_backtrace(stack, 1)) {
+ /* clamp level if tracing isn't available */
+ level = level > AWS_MEMTRACE_BYTES ? AWS_MEMTRACE_BYTES : level;
+ }
+
+ tracer->traced_allocator = traced_allocator;
+ tracer->level = level;
+
+ if (tracer->level >= AWS_MEMTRACE_BYTES) {
+ aws_atomic_init_int(&tracer->allocated, 0);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mutex_init(&tracer->mutex));
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS ==
+ aws_hash_table_init(
+ &tracer->allocs, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_alloc));
+ }
+
+ if (tracer->level == AWS_MEMTRACE_STACKS) {
+ if (frames_per_stack > 128) {
+ frames_per_stack = 128;
+ }
+ tracer->frames_per_stack = (frames_per_stack) ? frames_per_stack : 8;
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS ==
+ aws_hash_table_init(
+ &tracer->stacks, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_stacktrace));
+ }
+}
+
+static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t size) {
+ if (tracer->level == AWS_MEMTRACE_NONE) {
+ return;
+ }
+
+ aws_atomic_fetch_add(&tracer->allocated, size);
+
+ struct alloc_info *alloc = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct alloc_info));
+ AWS_FATAL_ASSERT(alloc);
+ alloc->size = size;
+ alloc->time = time(NULL);
+
+ if (tracer->level == AWS_MEMTRACE_STACKS) {
+ /* capture stack frames, skip 2 for this function and the allocation vtable function */
+ AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack));
+ size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack);
+ if (stack_depth) {
+ /* hash the stack pointers */
+ struct aws_byte_cursor stack_cursor =
+ aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *));
+ uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor);
+ alloc->stack = stack_id; /* associate the stack with the alloc */
+
+ aws_mutex_lock(&tracer->mutex);
+ struct aws_hash_element *item = NULL;
+ int was_created = 0;
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS ==
+ aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created));
+ /* If this is a new stack, save it to the hash */
+ if (was_created) {
+ struct stack_trace *stack = aws_mem_calloc(
+ aws_default_allocator(),
+ 1,
+ sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack));
+ AWS_FATAL_ASSERT(stack);
+ memcpy(
+ (void **)&stack->frames[0],
+ &stack_frames[FRAMES_TO_SKIP],
+ (stack_depth - FRAMES_TO_SKIP) * sizeof(void *));
+ stack->depth = stack_depth - FRAMES_TO_SKIP;
+ item->value = stack;
+ }
+ aws_mutex_unlock(&tracer->mutex);
+ }
+ }
+
+ aws_mutex_lock(&tracer->mutex);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_put(&tracer->allocs, ptr, alloc, NULL));
+ aws_mutex_unlock(&tracer->mutex);
+}
+
+static void s_alloc_tracer_untrack(struct alloc_tracer *tracer, void *ptr) {
+ if (tracer->level == AWS_MEMTRACE_NONE) {
+ return;
+ }
+
+ aws_mutex_lock(&tracer->mutex);
+ struct aws_hash_element *item;
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(&tracer->allocs, ptr, &item));
+ /* because the tracer can be installed at any time, it is possible for an allocation to not
+ * be tracked. Therefore, we make sure the find succeeds, but then check the returned
+ * value */
+ if (item) {
+ AWS_FATAL_ASSERT(item->key == ptr && item->value);
+ struct alloc_info *alloc = item->value;
+ aws_atomic_fetch_sub(&tracer->allocated, alloc->size);
+ s_destroy_alloc(item->value);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_remove_element(&tracer->allocs, item));
+ }
+ aws_mutex_unlock(&tracer->mutex);
+}
+
+/* used only to resolve stacks -> trace, count, size at dump time */
+struct stack_metadata {
+ struct aws_string *trace;
+ size_t count;
+ size_t size;
+};
+
+static int s_collect_stack_trace(void *context, struct aws_hash_element *item) {
+ struct alloc_tracer *tracer = context;
+ struct aws_hash_table *all_stacks = &tracer->stacks;
+ struct stack_metadata *stack_info = item->value;
+ struct aws_hash_element *stack_item = NULL;
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(all_stacks, item->key, &stack_item));
+ AWS_FATAL_ASSERT(stack_item);
+ struct stack_trace *stack = stack_item->value;
+ void *const *stack_frames = &stack->frames[0];
+
+ /* convert the frame pointers to symbols, and concat into a buffer */
+ char buf[4096] = {0};
+ struct aws_byte_buf stacktrace = aws_byte_buf_from_empty_array(buf, AWS_ARRAY_SIZE(buf));
+ struct aws_byte_cursor newline = aws_byte_cursor_from_c_str("\n");
+ char **symbols = aws_backtrace_symbols(stack_frames, stack->depth);
+ for (size_t idx = 0; idx < stack->depth; ++idx) {
+ if (idx > 0) {
+ aws_byte_buf_append(&stacktrace, &newline);
+ }
+ const char *caller = symbols[idx];
+ if (!caller || !caller[0]) {
+ break;
+ }
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller);
+ aws_byte_buf_append(&stacktrace, &cursor);
+ }
+ free(symbols);
+ /* record the resultant buffer as a string */
+ stack_info->trace = aws_string_new_from_array(aws_default_allocator(), stacktrace.buffer, stacktrace.len);
+ AWS_FATAL_ASSERT(stack_info->trace);
+ aws_byte_buf_clean_up(&stacktrace);
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+}
+
+static int s_stack_info_compare_size(const void *a, const void *b) {
+ const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
+ const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
+ return stack_b->size > stack_a->size;
+}
+
+static int s_stack_info_compare_count(const void *a, const void *b) {
+ const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
+ const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
+ return stack_b->count > stack_a->count;
+}
+
+static void s_stack_info_destroy(void *data) {
+ struct stack_metadata *stack = data;
+ struct aws_allocator *allocator = stack->trace->allocator;
+ aws_string_destroy(stack->trace);
+ aws_mem_release(allocator, stack);
+}
+
+/* tally up count/size per stack from all allocs */
+static int s_collect_stack_stats(void *context, struct aws_hash_element *item) {
+ struct aws_hash_table *stack_info = context;
+ struct alloc_info *alloc = item->value;
+ struct aws_hash_element *stack_item = NULL;
+ int was_created = 0;
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS ==
+ aws_hash_table_create(stack_info, (void *)(uintptr_t)alloc->stack, &stack_item, &was_created));
+ if (was_created) {
+ stack_item->value = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct stack_metadata));
+ AWS_FATAL_ASSERT(stack_item->value);
+ }
+ struct stack_metadata *stack = stack_item->value;
+ stack->count++;
+ stack->size += alloc->size;
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+}
+
+static int s_insert_stacks(void *context, struct aws_hash_element *item) {
+ struct aws_priority_queue *pq = context;
+ struct stack_metadata *stack = item->value;
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(pq, &stack));
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+}
+
+static int s_insert_allocs(void *context, struct aws_hash_element *item) {
+ struct aws_priority_queue *allocs = context;
+ struct alloc_info *alloc = item->value;
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(allocs, &alloc));
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+}
+
+static int s_alloc_compare(const void *a, const void *b) {
+ const struct alloc_info *alloc_a = *(const struct alloc_info **)a;
+ const struct alloc_info *alloc_b = *(const struct alloc_info **)b;
+ return alloc_a->time > alloc_b->time;
+}
+
+void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
+ struct alloc_tracer *tracer = trace_allocator->impl;
+ if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) {
+ return;
+ }
+
+ aws_mutex_lock(&tracer->mutex);
+
+ size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs);
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "tracer: %zu bytes still allocated in %zu allocations\n",
+ aws_atomic_load_int(&tracer->allocated),
+ num_allocs);
+
+ /* convert stacks from pointers -> symbols */
+ struct aws_hash_table stack_info;
+ AWS_ZERO_STRUCT(stack_info);
+ if (tracer->level == AWS_MEMTRACE_STACKS) {
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS ==
+ aws_hash_table_init(
+ &stack_info, aws_default_allocator(), 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy));
+ /* collect active stacks, tally up sizes and counts */
+ aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, &stack_info);
+ /* collect stack traces for active stacks */
+ aws_hash_table_foreach(&stack_info, s_collect_stack_trace, tracer);
+ }
+
+ /* sort allocs by time */
+ struct aws_priority_queue allocs;
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS ==
+ aws_priority_queue_init_dynamic(
+ &allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare));
+ aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs);
+ /* dump allocs by time */
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ while (aws_priority_queue_size(&allocs)) {
+ struct alloc_info *alloc = NULL;
+ aws_priority_queue_pop(&allocs, &alloc);
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n", alloc->size);
+ if (alloc->stack) {
+ struct aws_hash_element *item = NULL;
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item));
+ struct stack_metadata *stack = item->value;
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, " stacktrace:\n%s\n", (const char *)aws_string_bytes(stack->trace));
+ }
+ }
+
+ aws_priority_queue_clean_up(&allocs);
+
+ if (tracer->level == AWS_MEMTRACE_STACKS) {
+ size_t num_stacks = aws_hash_table_get_entry_count(&stack_info);
+ /* sort stacks by total size leaked */
+ struct aws_priority_queue stacks_by_size;
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
+ &stacks_by_size,
+ aws_default_allocator(),
+ num_stacks,
+ sizeof(struct stack_metadata *),
+ s_stack_info_compare_size));
+ aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size);
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "################################################################################\n");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "################################################################################\n");
+ while (aws_priority_queue_size(&stacks_by_size) > 0) {
+ struct stack_metadata *stack = NULL;
+ aws_priority_queue_pop(&stacks_by_size, &stack);
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n", stack->size, stack->count);
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
+ }
+ aws_priority_queue_clean_up(&stacks_by_size);
+
+ /* sort stacks by number of leaks */
+ struct aws_priority_queue stacks_by_count;
+ AWS_FATAL_ASSERT(
+ AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
+ &stacks_by_count,
+ aws_default_allocator(),
+ num_stacks,
+ sizeof(struct stack_metadata *),
+ s_stack_info_compare_count));
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "################################################################################\n");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "################################################################################\n");
+ aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count);
+ while (aws_priority_queue_size(&stacks_by_count) > 0) {
+ struct stack_metadata *stack = NULL;
+ aws_priority_queue_pop(&stacks_by_count, &stack);
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n", stack->count, stack->size);
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
+ }
+ aws_priority_queue_clean_up(&stacks_by_count);
+ aws_hash_table_clean_up(&stack_info);
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #\n");
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+
+ aws_mutex_unlock(&tracer->mutex);
+}
+
+static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size) {
+ struct alloc_tracer *tracer = allocator->impl;
+ void *ptr = aws_mem_acquire(tracer->traced_allocator, size);
+ if (ptr) {
+ s_alloc_tracer_track(tracer, ptr, size);
+ }
+ return ptr;
+}
+
+static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) {
+ struct alloc_tracer *tracer = allocator->impl;
+ s_alloc_tracer_untrack(tracer, ptr);
+ aws_mem_release(tracer->traced_allocator, ptr);
+}
+
+static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
+ struct alloc_tracer *tracer = allocator->impl;
+ void *new_ptr = old_ptr;
+ if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) {
+ return NULL;
+ }
+
+ s_alloc_tracer_untrack(tracer, old_ptr);
+ s_alloc_tracer_track(tracer, new_ptr, new_size);
+
+ return new_ptr;
+}
+
+static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
+ struct alloc_tracer *tracer = allocator->impl;
+ void *ptr = aws_mem_calloc(tracer->traced_allocator, num, size);
+ if (ptr) {
+ s_alloc_tracer_track(tracer, ptr, num * size);
+ }
+ return ptr;
+}
+
+struct aws_allocator *aws_mem_tracer_new(
+ struct aws_allocator *allocator,
+ struct aws_allocator *deprecated,
+ enum aws_mem_trace_level level,
+ size_t frames_per_stack) {
+
+ /* deprecated customizeable bookkeeping allocator */
+ (void)deprecated;
+
+ struct alloc_tracer *tracer = NULL;
+ struct aws_allocator *trace_allocator = NULL;
+ aws_mem_acquire_many(
+ aws_default_allocator(),
+ 2,
+ &tracer,
+ sizeof(struct alloc_tracer),
+ &trace_allocator,
+ sizeof(struct aws_allocator));
+
+ AWS_FATAL_ASSERT(trace_allocator);
+ AWS_FATAL_ASSERT(tracer);
+
+ AWS_ZERO_STRUCT(*trace_allocator);
+ AWS_ZERO_STRUCT(*tracer);
+
+ /* copy the template vtable s*/
+ *trace_allocator = s_trace_allocator;
+ trace_allocator->impl = tracer;
+
+ s_alloc_tracer_init(tracer, allocator, level, frames_per_stack);
+ return trace_allocator;
+}
+
+struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator) {
+ struct alloc_tracer *tracer = trace_allocator->impl;
+ struct aws_allocator *allocator = tracer->traced_allocator;
+
+ if (tracer->level != AWS_MEMTRACE_NONE) {
+ aws_mutex_lock(&tracer->mutex);
+ aws_hash_table_clean_up(&tracer->allocs);
+ aws_hash_table_clean_up(&tracer->stacks);
+ aws_mutex_unlock(&tracer->mutex);
+ aws_mutex_clean_up(&tracer->mutex);
+ }
+
+ aws_mem_release(aws_default_allocator(), tracer);
+ /* trace_allocator is freed as part of the block tracer was allocated in */
+
+ return allocator;
+}
+
+size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator) {
+ struct alloc_tracer *tracer = trace_allocator->impl;
+ if (tracer->level == AWS_MEMTRACE_NONE) {
+ return 0;
+ }
+
+ return aws_atomic_load_int(&tracer->allocated);
+}
+
+size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator) {
+ struct alloc_tracer *tracer = trace_allocator->impl;
+ if (tracer->level == AWS_MEMTRACE_NONE) {
+ return 0;
+ }
+
+ aws_mutex_lock(&tracer->mutex);
+ size_t count = aws_hash_table_get_entry_count(&tracer->allocs);
+ aws_mutex_unlock(&tracer->mutex);
+ return count;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/clock.c b/contrib/restricted/aws/aws-c-common/source/posix/clock.c
index 2e2b43f266..90e213ea7c 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/clock.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/clock.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/clock.h>
@@ -35,9 +35,9 @@ static int s_legacy_get_time(uint64_t *timestamp) {
return aws_raise_error(AWS_ERROR_CLOCK_FAILURE);
}
- uint64_t secs = (uint64_t)tv.tv_sec;
- uint64_t u_secs = (uint64_t)tv.tv_usec;
- *timestamp = (secs * NS_PER_SEC) + (u_secs * 1000);
+ uint64_t secs = (uint64_t)tv.tv_sec;
+ uint64_t u_secs = (uint64_t)tv.tv_usec;
+ *timestamp = (secs * NS_PER_SEC) + (u_secs * 1000);
return AWS_OP_SUCCESS;
}
@@ -45,13 +45,13 @@ static int s_legacy_get_time(uint64_t *timestamp) {
static aws_thread_once s_thread_once_flag = AWS_THREAD_ONCE_STATIC_INIT;
static int (*s_gettime_fn)(clockid_t __clock_id, struct timespec *__tp) = NULL;
-static void s_do_osx_loads(void *user_data) {
- (void)user_data;
+static void s_do_osx_loads(void *user_data) {
+ (void)user_data;
s_gettime_fn = (int (*)(clockid_t __clock_id, struct timespec * __tp)) dlsym(RTLD_DEFAULT, "clock_gettime");
}
int aws_high_res_clock_get_ticks(uint64_t *timestamp) {
- aws_thread_call_once(&s_thread_once_flag, s_do_osx_loads, NULL);
+ aws_thread_call_once(&s_thread_once_flag, s_do_osx_loads, NULL);
int ret_val = 0;
if (s_gettime_fn) {
@@ -62,9 +62,9 @@ int aws_high_res_clock_get_ticks(uint64_t *timestamp) {
return aws_raise_error(AWS_ERROR_CLOCK_FAILURE);
}
- uint64_t secs = (uint64_t)ts.tv_sec;
- uint64_t n_secs = (uint64_t)ts.tv_nsec;
- *timestamp = (secs * NS_PER_SEC) + n_secs;
+ uint64_t secs = (uint64_t)ts.tv_sec;
+ uint64_t n_secs = (uint64_t)ts.tv_nsec;
+ *timestamp = (secs * NS_PER_SEC) + n_secs;
return AWS_OP_SUCCESS;
}
@@ -72,7 +72,7 @@ int aws_high_res_clock_get_ticks(uint64_t *timestamp) {
}
int aws_sys_clock_get_ticks(uint64_t *timestamp) {
- aws_thread_call_once(&s_thread_once_flag, s_do_osx_loads, NULL);
+ aws_thread_call_once(&s_thread_once_flag, s_do_osx_loads, NULL);
int ret_val = 0;
if (s_gettime_fn) {
@@ -82,9 +82,9 @@ int aws_sys_clock_get_ticks(uint64_t *timestamp) {
return aws_raise_error(AWS_ERROR_CLOCK_FAILURE);
}
- uint64_t secs = (uint64_t)ts.tv_sec;
- uint64_t n_secs = (uint64_t)ts.tv_nsec;
- *timestamp = (secs * NS_PER_SEC) + n_secs;
+ uint64_t secs = (uint64_t)ts.tv_sec;
+ uint64_t n_secs = (uint64_t)ts.tv_nsec;
+ *timestamp = (secs * NS_PER_SEC) + n_secs;
return AWS_OP_SUCCESS;
}
return s_legacy_get_time(timestamp);
@@ -112,9 +112,9 @@ int aws_high_res_clock_get_ticks(uint64_t *timestamp) {
return aws_raise_error(AWS_ERROR_CLOCK_FAILURE);
}
- uint64_t secs = (uint64_t)ts.tv_sec;
- uint64_t n_secs = (uint64_t)ts.tv_nsec;
- *timestamp = (secs * NS_PER_SEC) + n_secs;
+ uint64_t secs = (uint64_t)ts.tv_sec;
+ uint64_t n_secs = (uint64_t)ts.tv_nsec;
+ *timestamp = (secs * NS_PER_SEC) + n_secs;
return AWS_OP_SUCCESS;
}
@@ -127,10 +127,10 @@ int aws_sys_clock_get_ticks(uint64_t *timestamp) {
return aws_raise_error(AWS_ERROR_CLOCK_FAILURE);
}
- uint64_t secs = (uint64_t)ts.tv_sec;
- uint64_t n_secs = (uint64_t)ts.tv_nsec;
- *timestamp = (secs * NS_PER_SEC) + n_secs;
-
+ uint64_t secs = (uint64_t)ts.tv_sec;
+ uint64_t n_secs = (uint64_t)ts.tv_nsec;
+ *timestamp = (secs * NS_PER_SEC) + n_secs;
+
return AWS_OP_SUCCESS;
}
#endif /* defined(__MACH__) */
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c b/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c
index b45fc7382d..ca321c6bfa 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/condition_variable.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/condition_variable.h>
@@ -22,30 +22,30 @@ static int process_error_code(int err) {
}
int aws_condition_variable_init(struct aws_condition_variable *condition_variable) {
- AWS_PRECONDITION(condition_variable);
-
+ AWS_PRECONDITION(condition_variable);
+
if (pthread_cond_init(&condition_variable->condition_handle, NULL)) {
- AWS_ZERO_STRUCT(*condition_variable);
+ AWS_ZERO_STRUCT(*condition_variable);
return aws_raise_error(AWS_ERROR_COND_VARIABLE_INIT_FAILED);
}
- condition_variable->initialized = true;
+ condition_variable->initialized = true;
return AWS_OP_SUCCESS;
}
void aws_condition_variable_clean_up(struct aws_condition_variable *condition_variable) {
- AWS_PRECONDITION(condition_variable);
-
- if (condition_variable->initialized) {
- pthread_cond_destroy(&condition_variable->condition_handle);
- }
-
- AWS_ZERO_STRUCT(*condition_variable);
+ AWS_PRECONDITION(condition_variable);
+
+ if (condition_variable->initialized) {
+ pthread_cond_destroy(&condition_variable->condition_handle);
+ }
+
+ AWS_ZERO_STRUCT(*condition_variable);
}
int aws_condition_variable_notify_one(struct aws_condition_variable *condition_variable) {
- AWS_PRECONDITION(condition_variable && condition_variable->initialized);
-
+ AWS_PRECONDITION(condition_variable && condition_variable->initialized);
+
int err_code = pthread_cond_signal(&condition_variable->condition_handle);
if (err_code) {
@@ -56,8 +56,8 @@ int aws_condition_variable_notify_one(struct aws_condition_variable *condition_v
}
int aws_condition_variable_notify_all(struct aws_condition_variable *condition_variable) {
- AWS_PRECONDITION(condition_variable && condition_variable->initialized);
-
+ AWS_PRECONDITION(condition_variable && condition_variable->initialized);
+
int err_code = pthread_cond_broadcast(&condition_variable->condition_handle);
if (err_code) {
@@ -68,9 +68,9 @@ int aws_condition_variable_notify_all(struct aws_condition_variable *condition_v
}
int aws_condition_variable_wait(struct aws_condition_variable *condition_variable, struct aws_mutex *mutex) {
- AWS_PRECONDITION(condition_variable && condition_variable->initialized);
- AWS_PRECONDITION(mutex && mutex->initialized);
-
+ AWS_PRECONDITION(condition_variable && condition_variable->initialized);
+ AWS_PRECONDITION(mutex && mutex->initialized);
+
int err_code = pthread_cond_wait(&condition_variable->condition_handle, &mutex->mutex_handle);
if (err_code) {
@@ -85,9 +85,9 @@ int aws_condition_variable_wait_for(
struct aws_mutex *mutex,
int64_t time_to_wait) {
- AWS_PRECONDITION(condition_variable && condition_variable->initialized);
- AWS_PRECONDITION(mutex && mutex->initialized);
-
+ AWS_PRECONDITION(condition_variable && condition_variable->initialized);
+ AWS_PRECONDITION(mutex && mutex->initialized);
+
uint64_t current_sys_time = 0;
if (aws_sys_clock_get_ticks(&current_sys_time)) {
return AWS_OP_ERR;
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/device_random.c b/contrib/restricted/aws/aws-c-common/source/posix/device_random.c
index e60edc7e20..f446002231 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/device_random.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/device_random.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/device_random.h>
@@ -18,8 +18,8 @@ static aws_thread_once s_rand_init = AWS_THREAD_ONCE_STATIC_INIT;
#else
# define OPEN_FLAGS (O_RDONLY)
#endif
-static void s_init_rand(void *user_data) {
- (void)user_data;
+static void s_init_rand(void *user_data) {
+ (void)user_data;
s_rand_fd = open("/dev/urandom", OPEN_FLAGS);
if (s_rand_fd == -1) {
@@ -37,7 +37,7 @@ static void s_init_rand(void *user_data) {
static int s_fallback_device_random_buffer(struct aws_byte_buf *output) {
- aws_thread_call_once(&s_rand_init, s_init_rand, NULL);
+ aws_thread_call_once(&s_rand_init, s_init_rand, NULL);
size_t diff = output->capacity - output->len;
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/environment.c b/contrib/restricted/aws/aws-c-common/source/posix/environment.c
index 2fd670eaf6..f4b69caea2 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/environment.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/environment.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/environment.h>
@@ -13,7 +13,7 @@ int aws_get_environment_value(
const struct aws_string *variable_name,
struct aws_string **value_out) {
- const char *value = getenv(aws_string_c_str(variable_name));
+ const char *value = getenv(aws_string_c_str(variable_name));
if (value == NULL) {
*value_out = NULL;
return AWS_OP_SUCCESS;
@@ -29,7 +29,7 @@ int aws_get_environment_value(
int aws_set_environment_value(const struct aws_string *variable_name, const struct aws_string *value) {
- if (setenv(aws_string_c_str(variable_name), aws_string_c_str(value), 1) != 0) {
+ if (setenv(aws_string_c_str(variable_name), aws_string_c_str(value), 1) != 0) {
return aws_raise_error(AWS_ERROR_ENVIRONMENT_SET);
}
@@ -37,7 +37,7 @@ int aws_set_environment_value(const struct aws_string *variable_name, const stru
}
int aws_unset_environment_value(const struct aws_string *variable_name) {
- if (unsetenv(aws_string_c_str(variable_name)) != 0) {
+ if (unsetenv(aws_string_c_str(variable_name)) != 0) {
return aws_raise_error(AWS_ERROR_ENVIRONMENT_UNSET);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/mutex.c b/contrib/restricted/aws/aws-c-common/source/posix/mutex.c
index bc3a94af28..2cbf2db66c 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/mutex.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/mutex.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/mutex.h>
@@ -9,15 +9,15 @@
#include <errno.h>
void aws_mutex_clean_up(struct aws_mutex *mutex) {
- AWS_PRECONDITION(mutex);
- if (mutex->initialized) {
- pthread_mutex_destroy(&mutex->mutex_handle);
- }
- AWS_ZERO_STRUCT(*mutex);
+ AWS_PRECONDITION(mutex);
+ if (mutex->initialized) {
+ pthread_mutex_destroy(&mutex->mutex_handle);
+ }
+ AWS_ZERO_STRUCT(*mutex);
}
int aws_mutex_init(struct aws_mutex *mutex) {
- AWS_PRECONDITION(mutex);
+ AWS_PRECONDITION(mutex);
pthread_mutexattr_t attr;
int err_code = pthread_mutexattr_init(&attr);
int return_code = AWS_OP_SUCCESS;
@@ -33,21 +33,21 @@ int aws_mutex_init(struct aws_mutex *mutex) {
return_code = aws_private_convert_and_raise_error_code(err_code);
}
- mutex->initialized = (return_code == AWS_OP_SUCCESS);
+ mutex->initialized = (return_code == AWS_OP_SUCCESS);
return return_code;
}
int aws_mutex_lock(struct aws_mutex *mutex) {
- AWS_PRECONDITION(mutex && mutex->initialized);
+ AWS_PRECONDITION(mutex && mutex->initialized);
return aws_private_convert_and_raise_error_code(pthread_mutex_lock(&mutex->mutex_handle));
}
int aws_mutex_try_lock(struct aws_mutex *mutex) {
- AWS_PRECONDITION(mutex && mutex->initialized);
+ AWS_PRECONDITION(mutex && mutex->initialized);
return aws_private_convert_and_raise_error_code(pthread_mutex_trylock(&mutex->mutex_handle));
}
int aws_mutex_unlock(struct aws_mutex *mutex) {
- AWS_PRECONDITION(mutex && mutex->initialized);
+ AWS_PRECONDITION(mutex && mutex->initialized);
return aws_private_convert_and_raise_error_code(pthread_mutex_unlock(&mutex->mutex_handle));
}
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/process.c b/contrib/restricted/aws/aws-c-common/source/posix/process.c
index 217ccb7d32..fb808faeb6 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/process.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/process.c
@@ -1,53 +1,53 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/process.h>
-
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <unistd.h>
-
-int aws_get_pid(void) {
- return (int)getpid();
-}
-
-size_t aws_get_soft_limit_io_handles(void) {
- struct rlimit rlimit;
- AWS_ZERO_STRUCT(rlimit);
-
- AWS_FATAL_ASSERT(
- !getrlimit(RLIMIT_NOFILE, &rlimit) &&
- "getrlimit() should never fail for RLIMIT_NOFILE regardless of user permissions");
- return rlimit.rlim_cur;
-}
-
-size_t aws_get_hard_limit_io_handles(void) {
- struct rlimit rlimit;
- AWS_ZERO_STRUCT(rlimit);
-
- AWS_FATAL_ASSERT(
- !getrlimit(RLIMIT_NOFILE, &rlimit) &&
- "getrlimit() should never fail for RLIMIT_NOFILE regardless of user permissions");
- return rlimit.rlim_max;
-}
-
-int aws_set_soft_limit_io_handles(size_t max_handles) {
- size_t hard_limit = aws_get_hard_limit_io_handles();
-
- if (max_handles > hard_limit) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- struct rlimit rlimit = {
- .rlim_cur = max_handles,
- .rlim_max = hard_limit,
- };
-
- if (setrlimit(RLIMIT_NOFILE, &rlimit)) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- return AWS_OP_SUCCESS;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/process.h>
+
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+int aws_get_pid(void) {
+ return (int)getpid();
+}
+
+size_t aws_get_soft_limit_io_handles(void) {
+ struct rlimit rlimit;
+ AWS_ZERO_STRUCT(rlimit);
+
+ AWS_FATAL_ASSERT(
+ !getrlimit(RLIMIT_NOFILE, &rlimit) &&
+ "getrlimit() should never fail for RLIMIT_NOFILE regardless of user permissions");
+ return rlimit.rlim_cur;
+}
+
+size_t aws_get_hard_limit_io_handles(void) {
+ struct rlimit rlimit;
+ AWS_ZERO_STRUCT(rlimit);
+
+ AWS_FATAL_ASSERT(
+ !getrlimit(RLIMIT_NOFILE, &rlimit) &&
+ "getrlimit() should never fail for RLIMIT_NOFILE regardless of user permissions");
+ return rlimit.rlim_max;
+}
+
+int aws_set_soft_limit_io_handles(size_t max_handles) {
+ size_t hard_limit = aws_get_hard_limit_io_handles();
+
+ if (max_handles > hard_limit) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct rlimit rlimit = {
+ .rlim_cur = max_handles,
+ .rlim_max = hard_limit,
+ };
+
+ if (setrlimit(RLIMIT_NOFILE, &rlimit)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c b/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c
index e814ea8ebf..824477d6cf 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/rw_lock.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/atomics.h>
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
index 26faefc35e..1311be4096 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/system_info.c
@@ -1,14 +1,14 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/system_info.h>
-#include <aws/common/byte_buf.h>
-#include <aws/common/logging.h>
-#include <aws/common/platform.h>
-
+#include <aws/common/byte_buf.h>
+#include <aws/common/logging.h>
+#include <aws/common/platform.h>
+
#if defined(__FreeBSD__) || defined(__NetBSD__)
# define __BSD_VISIBLE 1
#endif
@@ -22,13 +22,13 @@ size_t aws_system_info_processor_count(void) {
return (size_t)nprocs;
}
- AWS_FATAL_POSTCONDITION(nprocs >= 0);
+ AWS_FATAL_POSTCONDITION(nprocs >= 0);
return 0;
}
#else
size_t aws_system_info_processor_count(void) {
# if defined(AWS_NUM_CPU_CORES)
- AWS_FATAL_PRECONDITION(AWS_NUM_CPU_CORES > 0);
+ AWS_FATAL_PRECONDITION(AWS_NUM_CPU_CORES > 0);
return AWS_NUM_CPU_CORES;
# else
return 1;
@@ -36,42 +36,42 @@ size_t aws_system_info_processor_count(void) {
}
#endif
-#include <ctype.h>
-#include <fcntl.h>
-
-bool aws_is_debugger_present(void) {
- /* Open the status file */
- const int status_fd = open("/proc/self/status", O_RDONLY);
- if (status_fd == -1) {
- return false;
- }
-
- /* Read its contents */
- char buf[4096];
- const ssize_t num_read = read(status_fd, buf, sizeof(buf) - 1);
- close(status_fd);
- if (num_read <= 0) {
- return false;
- }
- buf[num_read] = '\0';
-
- /* Search for the TracerPid field, which will indicate the debugger process */
- const char tracerPidString[] = "TracerPid:";
- const char *tracer_pid = strstr(buf, tracerPidString);
- if (!tracer_pid) {
- return false;
- }
-
- /* If it's not 0, then there's a debugger */
- for (const char *cur = tracer_pid + sizeof(tracerPidString) - 1; cur <= buf + num_read; ++cur) {
- if (!aws_isspace(*cur)) {
- return aws_isdigit(*cur) && *cur != '0';
- }
- }
-
- return false;
-}
-
+#include <ctype.h>
+#include <fcntl.h>
+
+bool aws_is_debugger_present(void) {
+ /* Open the status file */
+ const int status_fd = open("/proc/self/status", O_RDONLY);
+ if (status_fd == -1) {
+ return false;
+ }
+
+ /* Read its contents */
+ char buf[4096];
+ const ssize_t num_read = read(status_fd, buf, sizeof(buf) - 1);
+ close(status_fd);
+ if (num_read <= 0) {
+ return false;
+ }
+ buf[num_read] = '\0';
+
+ /* Search for the TracerPid field, which will indicate the debugger process */
+ const char tracerPidString[] = "TracerPid:";
+ const char *tracer_pid = strstr(buf, tracerPidString);
+ if (!tracer_pid) {
+ return false;
+ }
+
+ /* If it's not 0, then there's a debugger */
+ for (const char *cur = tracer_pid + sizeof(tracerPidString) - 1; cur <= buf + num_read; ++cur) {
+ if (!aws_isspace(*cur)) {
+ return aws_isdigit(*cur) && *cur != '0';
+ }
+ }
+
+ return false;
+}
+
#include <signal.h>
#ifndef __has_builtin
@@ -80,13 +80,13 @@ bool aws_is_debugger_present(void) {
void aws_debug_break(void) {
#ifdef DEBUG_BUILD
- if (aws_is_debugger_present()) {
+ if (aws_is_debugger_present()) {
# if __has_builtin(__builtin_debugtrap)
- __builtin_debugtrap();
+ __builtin_debugtrap();
# else
- raise(SIGTRAP);
+ raise(SIGTRAP);
# endif
- }
+ }
#endif /* DEBUG_BUILD */
}
@@ -103,22 +103,22 @@ struct aws_stack_frame_info {
char function[128];
};
-/* Ensure only safe characters in a path buffer in case someone tries to
- rename the exe and trigger shell execution via the sub commands used to
- resolve symbols */
-char *s_whitelist_chars(char *path) {
- char *cur = path;
- while (*cur) {
- bool whitelisted = aws_isalnum(*cur) || aws_isspace(*cur) || *cur == '/' || *cur == '_' || *cur == '.' ||
- (cur > path && *cur == '-');
- if (!whitelisted) {
- *cur = '_';
- }
- ++cur;
- }
- return path;
-}
-
+/* Ensure only safe characters in a path buffer in case someone tries to
+ rename the exe and trigger shell execution via the sub commands used to
+ resolve symbols */
+char *s_whitelist_chars(char *path) {
+ char *cur = path;
+ while (*cur) {
+ bool whitelisted = aws_isalnum(*cur) || aws_isspace(*cur) || *cur == '/' || *cur == '_' || *cur == '.' ||
+ (cur > path && *cur == '-');
+ if (!whitelisted) {
+ *cur = '_';
+ }
+ ++cur;
+ }
+ return path;
+}
+
# if defined(__APPLE__)
# include <ctype.h>
# include <dlfcn.h>
@@ -141,16 +141,16 @@ int s_parse_symbol(const char *symbol, void *addr, struct aws_stack_frame_info *
const char *current_exe = s_get_executable_path();
/* parse exe/shared lib */
const char *exe_start = strstr(symbol, " ");
- while (aws_isspace(*exe_start)) {
+ while (aws_isspace(*exe_start)) {
++exe_start;
- }
+ }
const char *exe_end = strstr(exe_start, " ");
strncpy(frame->exe, exe_start, exe_end - exe_start);
/* executables get basename'd, so restore the path */
if (strstr(current_exe, frame->exe)) {
strncpy(frame->exe, current_exe, strlen(current_exe));
}
- s_whitelist_chars(frame->exe);
+ s_whitelist_chars(frame->exe);
/* parse addr */
const char *addr_start = strstr(exe_end, "0x");
@@ -160,11 +160,11 @@ int s_parse_symbol(const char *symbol, void *addr, struct aws_stack_frame_info *
/* parse function */
const char *function_start = strstr(addr_end, " ") + 1;
const char *function_end = strstr(function_start, " ");
- /* truncate function name if needed */
- size_t function_len = function_end - function_start;
- if (function_len >= (sizeof(frame->function) - 1)) {
- function_len = sizeof(frame->function) - 1;
- }
+ /* truncate function name if needed */
+ size_t function_len = function_end - function_start;
+ if (function_len >= (sizeof(frame->function) - 1)) {
+ function_len = sizeof(frame->function) - 1;
+ }
strncpy(frame->function, function_start, function_end - function_start);
/* find base addr for library/exe */
@@ -180,9 +180,9 @@ void s_resolve_cmd(char *cmd, size_t len, struct aws_stack_frame_info *frame) {
}
# else
int s_parse_symbol(const char *symbol, void *addr, struct aws_stack_frame_info *frame) {
- /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
+ /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
* or: <exe-or-shared-lib> [0x<addr>]
- * or: [0x<addr>]
+ * or: [0x<addr>]
*/
(void)addr;
const char *open_paren = strstr(symbol, "(");
@@ -190,45 +190,45 @@ int s_parse_symbol(const char *symbol, void *addr, struct aws_stack_frame_info *
const char *exe_end = open_paren;
/* there may not be a function in parens, or parens at all */
if (open_paren == NULL || close_paren == NULL) {
- exe_end = strstr(symbol, "[");
+ exe_end = strstr(symbol, "[");
if (!exe_end) {
return AWS_OP_ERR;
}
- /* if exe_end == symbol, there's no exe */
- if (exe_end != symbol) {
- exe_end -= 1;
- }
+ /* if exe_end == symbol, there's no exe */
+ if (exe_end != symbol) {
+ exe_end -= 1;
+ }
}
ptrdiff_t exe_len = exe_end - symbol;
- if (exe_len > 0) {
- strncpy(frame->exe, symbol, exe_len);
+ if (exe_len > 0) {
+ strncpy(frame->exe, symbol, exe_len);
}
- s_whitelist_chars(frame->exe);
+ s_whitelist_chars(frame->exe);
long function_len = (open_paren && close_paren) ? close_paren - open_paren - 1 : 0;
if (function_len > 0) { /* dynamic symbol was found */
- /* there might be (<function>+<addr>) or just (<function>) */
- const char *function_start = open_paren + 1;
- const char *plus = strstr(function_start, "+");
- const char *function_end = (plus) ? plus : close_paren;
- if (function_end > function_start) {
- function_len = function_end - function_start;
- strncpy(frame->function, function_start, function_len);
- } else if (plus) {
- long addr_len = close_paren - plus - 1;
- strncpy(frame->addr, plus + 1, addr_len);
- }
+ /* there might be (<function>+<addr>) or just (<function>) */
+ const char *function_start = open_paren + 1;
+ const char *plus = strstr(function_start, "+");
+ const char *function_end = (plus) ? plus : close_paren;
+ if (function_end > function_start) {
+ function_len = function_end - function_start;
+ strncpy(frame->function, function_start, function_len);
+ } else if (plus) {
+ long addr_len = close_paren - plus - 1;
+ strncpy(frame->addr, plus + 1, addr_len);
+ }
+ }
+ if (frame->addr[0] == 0) {
+ /* use the address in []'s, since it's all we have */
+ const char *addr_start = strstr(exe_end, "[") + 1;
+ char *addr_end = strstr(addr_start, "]");
+ if (!addr_end) {
+ return AWS_OP_ERR;
+ }
+ strncpy(frame->addr, addr_start, addr_end - addr_start);
}
- if (frame->addr[0] == 0) {
- /* use the address in []'s, since it's all we have */
- const char *addr_start = strstr(exe_end, "[") + 1;
- char *addr_end = strstr(addr_start, "]");
- if (!addr_end) {
- return AWS_OP_ERR;
- }
- strncpy(frame->addr, addr_start, addr_end - addr_start);
- }
return AWS_OP_SUCCESS;
}
@@ -237,63 +237,63 @@ void s_resolve_cmd(char *cmd, size_t len, struct aws_stack_frame_info *frame) {
}
# endif
-size_t aws_backtrace(void **stack_frames, size_t num_frames) {
- return backtrace(stack_frames, (int)aws_min_size(num_frames, INT_MAX));
-}
-
-char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth) {
- return backtrace_symbols(stack_frames, (int)aws_min_size(stack_depth, INT_MAX));
-}
-
-char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) {
- char **symbols = aws_backtrace_symbols(stack_frames, stack_depth);
- AWS_FATAL_ASSERT(symbols);
- struct aws_byte_buf lines;
- aws_byte_buf_init(&lines, aws_default_allocator(), stack_depth * 256);
-
- /* insert pointers for each stack entry */
- memset(lines.buffer, 0, stack_depth * sizeof(void *));
- lines.len += stack_depth * sizeof(void *);
-
- /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
- * or: <exe-or-shared-lib> [0x<addr>]
- * start at 1 to skip the current frame (this function) */
- for (size_t frame_idx = 0; frame_idx < stack_depth; ++frame_idx) {
- struct aws_stack_frame_info frame;
- AWS_ZERO_STRUCT(frame);
- const char *symbol = symbols[frame_idx];
- if (s_parse_symbol(symbol, stack_frames[frame_idx], &frame)) {
- goto parse_failed;
- }
-
- /* TODO: Emulate libunwind */
- char cmd[sizeof(struct aws_stack_frame_info)] = {0};
- s_resolve_cmd(cmd, sizeof(cmd), &frame);
- FILE *out = popen(cmd, "r");
- if (!out) {
- goto parse_failed;
- }
- char output[1024];
- if (fgets(output, sizeof(output), out)) {
- /* if addr2line or atos don't know what to do with an address, they just echo it */
- /* if there are spaces in the output, then they resolved something */
- if (strstr(output, " ")) {
- symbol = output;
- }
- }
- pclose(out);
-
- parse_failed:
- /* record the pointer to where the symbol will be */
- *((char **)&lines.buffer[frame_idx * sizeof(void *)]) = (char *)lines.buffer + lines.len;
- struct aws_byte_cursor line_cursor = aws_byte_cursor_from_c_str(symbol);
- line_cursor.len += 1; /* strings must be null terminated, make sure we copy the null */
- aws_byte_buf_append_dynamic(&lines, &line_cursor);
- }
- free(symbols);
- return (char **)lines.buffer; /* caller is responsible for freeing */
-}
-
+size_t aws_backtrace(void **stack_frames, size_t num_frames) {
+ return backtrace(stack_frames, (int)aws_min_size(num_frames, INT_MAX));
+}
+
+char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth) {
+ return backtrace_symbols(stack_frames, (int)aws_min_size(stack_depth, INT_MAX));
+}
+
+char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) {
+ char **symbols = aws_backtrace_symbols(stack_frames, stack_depth);
+ AWS_FATAL_ASSERT(symbols);
+ struct aws_byte_buf lines;
+ aws_byte_buf_init(&lines, aws_default_allocator(), stack_depth * 256);
+
+ /* insert pointers for each stack entry */
+ memset(lines.buffer, 0, stack_depth * sizeof(void *));
+ lines.len += stack_depth * sizeof(void *);
+
+ /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
+ * or: <exe-or-shared-lib> [0x<addr>]
+ * start at 1 to skip the current frame (this function) */
+ for (size_t frame_idx = 0; frame_idx < stack_depth; ++frame_idx) {
+ struct aws_stack_frame_info frame;
+ AWS_ZERO_STRUCT(frame);
+ const char *symbol = symbols[frame_idx];
+ if (s_parse_symbol(symbol, stack_frames[frame_idx], &frame)) {
+ goto parse_failed;
+ }
+
+ /* TODO: Emulate libunwind */
+ char cmd[sizeof(struct aws_stack_frame_info)] = {0};
+ s_resolve_cmd(cmd, sizeof(cmd), &frame);
+ FILE *out = popen(cmd, "r");
+ if (!out) {
+ goto parse_failed;
+ }
+ char output[1024];
+ if (fgets(output, sizeof(output), out)) {
+ /* if addr2line or atos don't know what to do with an address, they just echo it */
+ /* if there are spaces in the output, then they resolved something */
+ if (strstr(output, " ")) {
+ symbol = output;
+ }
+ }
+ pclose(out);
+
+ parse_failed:
+ /* record the pointer to where the symbol will be */
+ *((char **)&lines.buffer[frame_idx * sizeof(void *)]) = (char *)lines.buffer + lines.len;
+ struct aws_byte_cursor line_cursor = aws_byte_cursor_from_c_str(symbol);
+ line_cursor.len += 1; /* strings must be null terminated, make sure we copy the null */
+ aws_byte_buf_append_dynamic(&lines, &line_cursor);
+ }
+ free(symbols);
+ return (char **)lines.buffer; /* caller is responsible for freeing */
+}
+
void aws_backtrace_print(FILE *fp, void *call_site_data) {
siginfo_t *siginfo = call_site_data;
if (siginfo) {
@@ -304,21 +304,21 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) {
}
void *stack_frames[AWS_BACKTRACE_DEPTH];
- size_t stack_depth = aws_backtrace(stack_frames, AWS_BACKTRACE_DEPTH);
- char **symbols = aws_backtrace_symbols(stack_frames, stack_depth);
+ size_t stack_depth = aws_backtrace(stack_frames, AWS_BACKTRACE_DEPTH);
+ char **symbols = aws_backtrace_symbols(stack_frames, stack_depth);
if (symbols == NULL) {
fprintf(fp, "Unable to decode backtrace via backtrace_symbols\n");
return;
}
- fprintf(fp, "################################################################################\n");
- fprintf(fp, "Resolved stacktrace:\n");
- fprintf(fp, "################################################################################\n");
- /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
+ fprintf(fp, "################################################################################\n");
+ fprintf(fp, "Resolved stacktrace:\n");
+ fprintf(fp, "################################################################################\n");
+ /* symbols look like: <exe-or-shared-lib>(<function>+<addr>) [0x<addr>]
* or: <exe-or-shared-lib> [0x<addr>]
- * or: [0x<addr>]
+ * or: [0x<addr>]
* start at 1 to skip the current frame (this function) */
- for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
+ for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
struct aws_stack_frame_info frame;
AWS_ZERO_STRUCT(frame);
const char *symbol = symbols[frame_idx];
@@ -346,64 +346,64 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) {
parse_failed:
fprintf(fp, "%s%s", symbol, (symbol == symbols[frame_idx]) ? "\n" : "");
}
-
- fprintf(fp, "################################################################################\n");
- fprintf(fp, "Raw stacktrace:\n");
- fprintf(fp, "################################################################################\n");
- for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
- const char *symbol = symbols[frame_idx];
- fprintf(fp, "%s\n", symbol);
- }
- fflush(fp);
-
+
+ fprintf(fp, "################################################################################\n");
+ fprintf(fp, "Raw stacktrace:\n");
+ fprintf(fp, "################################################################################\n");
+ for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) {
+ const char *symbol = symbols[frame_idx];
+ fprintf(fp, "%s\n", symbol);
+ }
+ fflush(fp);
+
free(symbols);
}
#else
void aws_backtrace_print(FILE *fp, void *call_site_data) {
- (void)call_site_data;
+ (void)call_site_data;
fprintf(fp, "No call stack information available\n");
}
-
-size_t aws_backtrace(void **stack_frames, size_t size) {
- (void)stack_frames;
- (void)size;
- return 0;
-}
-
-char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth) {
- (void)stack_frames;
- (void)stack_depth;
- return NULL;
-}
-
-char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) {
- (void)stack_frames;
- (void)stack_depth;
- return NULL;
-}
+
+size_t aws_backtrace(void **stack_frames, size_t size) {
+ (void)stack_frames;
+ (void)size;
+ return 0;
+}
+
+char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth) {
+ (void)stack_frames;
+ (void)stack_depth;
+ return NULL;
+}
+
+char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) {
+ (void)stack_frames;
+ (void)stack_depth;
+ return NULL;
+}
#endif /* AWS_HAVE_EXECINFO */
-
-void aws_backtrace_log() {
- void *stack_frames[1024];
- size_t num_frames = aws_backtrace(stack_frames, 1024);
- if (!num_frames) {
- return;
- }
- char **symbols = aws_backtrace_addr2line(stack_frames, num_frames);
- for (size_t line = 0; line < num_frames; ++line) {
- const char *symbol = symbols[line];
- AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%s", symbol);
- }
- free(symbols);
-}
-
-#if defined(AWS_OS_APPLE)
-enum aws_platform_os aws_get_platform_build_os(void) {
- return AWS_PLATFORM_OS_MAC;
-}
-#else
-enum aws_platform_os aws_get_platform_build_os(void) {
- return AWS_PLATFORM_OS_UNIX;
-}
-#endif /* AWS_OS_APPLE */
+
+void aws_backtrace_log() {
+ void *stack_frames[1024];
+ size_t num_frames = aws_backtrace(stack_frames, 1024);
+ if (!num_frames) {
+ return;
+ }
+ char **symbols = aws_backtrace_addr2line(stack_frames, num_frames);
+ for (size_t line = 0; line < num_frames; ++line) {
+ const char *symbol = symbols[line];
+ AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%s", symbol);
+ }
+ free(symbols);
+}
+
+#if defined(AWS_OS_APPLE)
+enum aws_platform_os aws_get_platform_build_os(void) {
+ return AWS_PLATFORM_OS_MAC;
+}
+#else
+enum aws_platform_os aws_get_platform_build_os(void) {
+ return AWS_PLATFORM_OS_UNIX;
+}
+#endif /* AWS_OS_APPLE */
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/thread.c b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
index a2d4834b54..064d16882f 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/thread.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
@@ -1,94 +1,94 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
-#if !defined(__MACH__)
-# define _GNU_SOURCE
-#endif
+#if !defined(__MACH__)
+# define _GNU_SOURCE
+#endif
#include <aws/common/clock.h>
-#include <aws/common/logging.h>
-#include <aws/common/private/dlloads.h>
-#include <aws/common/thread.h>
+#include <aws/common/logging.h>
+#include <aws/common/private/dlloads.h>
+#include <aws/common/thread.h>
-#include <dlfcn.h>
+#include <dlfcn.h>
#include <errno.h>
-#include <inttypes.h>
+#include <inttypes.h>
#include <limits.h>
-#include <sched.h>
+#include <sched.h>
#include <time.h>
-#include <unistd.h>
+#include <unistd.h>
+
+#if defined(__FreeBSD__) || defined(__NETBSD__)
+# include <pthread_np.h>
+typedef cpuset_t cpu_set_t;
+#endif
-#if defined(__FreeBSD__) || defined(__NETBSD__)
-# include <pthread_np.h>
-typedef cpuset_t cpu_set_t;
-#endif
-
static struct aws_thread_options s_default_options = {
/* this will make sure platform default stack size is used. */
- .stack_size = 0,
- .cpu_id = -1,
-};
-
-struct thread_atexit_callback {
- aws_thread_atexit_fn *callback;
- void *user_data;
- struct thread_atexit_callback *next;
-};
-
+ .stack_size = 0,
+ .cpu_id = -1,
+};
+
+struct thread_atexit_callback {
+ aws_thread_atexit_fn *callback;
+ void *user_data;
+ struct thread_atexit_callback *next;
+};
+
struct thread_wrapper {
struct aws_allocator *allocator;
void (*func)(void *arg);
void *arg;
- struct thread_atexit_callback *atexit;
- void (*call_once)(void *);
- void *once_arg;
- struct aws_thread *thread;
- bool membind;
+ struct thread_atexit_callback *atexit;
+ void (*call_once)(void *);
+ void *once_arg;
+ struct aws_thread *thread;
+ bool membind;
};
-static AWS_THREAD_LOCAL struct thread_wrapper *tl_wrapper = NULL;
-
+static AWS_THREAD_LOCAL struct thread_wrapper *tl_wrapper = NULL;
+
static void *thread_fn(void *arg) {
struct thread_wrapper wrapper = *(struct thread_wrapper *)arg;
- struct aws_allocator *allocator = wrapper.allocator;
- tl_wrapper = &wrapper;
- if (wrapper.membind && g_set_mempolicy_ptr) {
- AWS_LOGF_INFO(
- AWS_LS_COMMON_THREAD,
- "id=%p: a cpu affinity was specified when launching this thread and set_mempolicy() is available on this "
- "system. Setting the memory policy to MPOL_PREFERRED",
- (void *)tl_wrapper->thread);
- /* if a user set a cpu id in their thread options, we're going to make sure the numa policy honors that
- * and makes sure the numa node of the cpu we launched this thread on is where memory gets allocated. However,
- * we don't want to fail the application if this fails, so make the call, and ignore the result. */
- long resp = g_set_mempolicy_ptr(AWS_MPOL_PREFERRED_ALIAS, NULL, 0);
- if (resp) {
- AWS_LOGF_WARN(
- AWS_LS_COMMON_THREAD,
- "id=%p: call to set_mempolicy() failed with errno %d",
- (void *)wrapper.thread,
- errno);
- }
- }
- wrapper.func(wrapper.arg);
-
- struct thread_atexit_callback *exit_callback_data = wrapper.atexit;
- aws_mem_release(allocator, arg);
-
- while (exit_callback_data) {
- aws_thread_atexit_fn *exit_callback = exit_callback_data->callback;
- void *exit_callback_user_data = exit_callback_data->user_data;
- struct thread_atexit_callback *next_exit_callback_data = exit_callback_data->next;
-
- aws_mem_release(allocator, exit_callback_data);
-
- exit_callback(exit_callback_user_data);
- exit_callback_data = next_exit_callback_data;
- }
- tl_wrapper = NULL;
-
+ struct aws_allocator *allocator = wrapper.allocator;
+ tl_wrapper = &wrapper;
+ if (wrapper.membind && g_set_mempolicy_ptr) {
+ AWS_LOGF_INFO(
+ AWS_LS_COMMON_THREAD,
+ "id=%p: a cpu affinity was specified when launching this thread and set_mempolicy() is available on this "
+ "system. Setting the memory policy to MPOL_PREFERRED",
+ (void *)tl_wrapper->thread);
+ /* if a user set a cpu id in their thread options, we're going to make sure the numa policy honors that
+ * and makes sure the numa node of the cpu we launched this thread on is where memory gets allocated. However,
+ * we don't want to fail the application if this fails, so make the call, and ignore the result. */
+ long resp = g_set_mempolicy_ptr(AWS_MPOL_PREFERRED_ALIAS, NULL, 0);
+ if (resp) {
+ AWS_LOGF_WARN(
+ AWS_LS_COMMON_THREAD,
+ "id=%p: call to set_mempolicy() failed with errno %d",
+ (void *)wrapper.thread,
+ errno);
+ }
+ }
+ wrapper.func(wrapper.arg);
+
+ struct thread_atexit_callback *exit_callback_data = wrapper.atexit;
+ aws_mem_release(allocator, arg);
+
+ while (exit_callback_data) {
+ aws_thread_atexit_fn *exit_callback = exit_callback_data->callback;
+ void *exit_callback_user_data = exit_callback_data->user_data;
+ struct thread_atexit_callback *next_exit_callback_data = exit_callback_data->next;
+
+ aws_mem_release(allocator, exit_callback_data);
+
+ exit_callback(exit_callback_user_data);
+ exit_callback_data = next_exit_callback_data;
+ }
+ tl_wrapper = NULL;
+
return NULL;
}
@@ -102,28 +102,28 @@ void aws_thread_clean_up(struct aws_thread *thread) {
}
}
-static void s_call_once(void) {
- tl_wrapper->call_once(tl_wrapper->once_arg);
+static void s_call_once(void) {
+ tl_wrapper->call_once(tl_wrapper->once_arg);
+}
+
+void aws_thread_call_once(aws_thread_once *flag, void (*call_once)(void *), void *user_data) {
+ // If this is a non-aws_thread, then gin up a temp thread wrapper
+ struct thread_wrapper temp_wrapper;
+ if (!tl_wrapper) {
+ tl_wrapper = &temp_wrapper;
+ }
+
+ tl_wrapper->call_once = call_once;
+ tl_wrapper->once_arg = user_data;
+ pthread_once(flag, s_call_once);
+
+ if (tl_wrapper == &temp_wrapper) {
+ tl_wrapper = NULL;
+ }
}
-void aws_thread_call_once(aws_thread_once *flag, void (*call_once)(void *), void *user_data) {
- // If this is a non-aws_thread, then gin up a temp thread wrapper
- struct thread_wrapper temp_wrapper;
- if (!tl_wrapper) {
- tl_wrapper = &temp_wrapper;
- }
-
- tl_wrapper->call_once = call_once;
- tl_wrapper->once_arg = user_data;
- pthread_once(flag, s_call_once);
-
- if (tl_wrapper == &temp_wrapper) {
- tl_wrapper = NULL;
- }
-}
-
int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator) {
- *thread = (struct aws_thread){.allocator = allocator, .detach_state = AWS_THREAD_NOT_CREATED};
+ *thread = (struct aws_thread){.allocator = allocator, .detach_state = AWS_THREAD_NOT_CREATED};
return AWS_OP_SUCCESS;
}
@@ -155,50 +155,50 @@ int aws_thread_launch(
goto cleanup;
}
}
-
-/* AFAIK you can't set thread affinity on apple platforms, and it doesn't really matter since all memory
- * NUMA or not is setup in interleave mode.
- * Thread afinity is also not supported on Android systems, and honestly, if you're running android on a NUMA
- * configuration, you've got bigger problems. */
-#if !defined(__MACH__) && !defined(__ANDROID__) && !defined(_musl_)
- if (options->cpu_id >= 0) {
- AWS_LOGF_INFO(
- AWS_LS_COMMON_THREAD,
- "id=%p: cpu affinity of cpu_id %d was specified, attempting to honor the value.",
- (void *)thread,
- options->cpu_id);
-
- cpu_set_t cpuset;
- CPU_ZERO(&cpuset);
- CPU_SET((uint32_t)options->cpu_id, &cpuset);
-
- attr_return = pthread_attr_setaffinity_np(attributes_ptr, sizeof(cpuset), &cpuset);
-
- if (attr_return) {
- AWS_LOGF_ERROR(
- AWS_LS_COMMON_THREAD,
- "id=%p: pthread_attr_setaffinity_np() failed with %d.",
- (void *)thread,
- errno);
- goto cleanup;
- }
- }
-#endif /* !defined(__MACH__) && !defined(__ANDROID__) */
+
+/* AFAIK you can't set thread affinity on apple platforms, and it doesn't really matter since all memory
+ * NUMA or not is setup in interleave mode.
+ * Thread afinity is also not supported on Android systems, and honestly, if you're running android on a NUMA
+ * configuration, you've got bigger problems. */
+#if !defined(__MACH__) && !defined(__ANDROID__) && !defined(_musl_)
+ if (options->cpu_id >= 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_COMMON_THREAD,
+ "id=%p: cpu affinity of cpu_id %d was specified, attempting to honor the value.",
+ (void *)thread,
+ options->cpu_id);
+
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET((uint32_t)options->cpu_id, &cpuset);
+
+ attr_return = pthread_attr_setaffinity_np(attributes_ptr, sizeof(cpuset), &cpuset);
+
+ if (attr_return) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_THREAD,
+ "id=%p: pthread_attr_setaffinity_np() failed with %d.",
+ (void *)thread,
+ errno);
+ goto cleanup;
+ }
+ }
+#endif /* !defined(__MACH__) && !defined(__ANDROID__) */
}
struct thread_wrapper *wrapper =
- (struct thread_wrapper *)aws_mem_calloc(thread->allocator, 1, sizeof(struct thread_wrapper));
+ (struct thread_wrapper *)aws_mem_calloc(thread->allocator, 1, sizeof(struct thread_wrapper));
if (!wrapper) {
allocation_failed = 1;
goto cleanup;
}
- if (options && options->cpu_id >= 0) {
- wrapper->membind = true;
- }
-
- wrapper->thread = thread;
+ if (options && options->cpu_id >= 0) {
+ wrapper->membind = true;
+ }
+
+ wrapper->thread = thread;
wrapper->allocator = thread->allocator;
wrapper->func = func;
wrapper->arg = arg;
@@ -234,8 +234,8 @@ cleanup:
return AWS_OP_SUCCESS;
}
-aws_thread_id_t aws_thread_get_id(struct aws_thread *thread) {
- return thread->thread_id;
+aws_thread_id_t aws_thread_get_id(struct aws_thread *thread) {
+ return thread->thread_id;
}
enum aws_thread_detach_state aws_thread_get_detach_state(struct aws_thread *thread) {
@@ -264,14 +264,14 @@ int aws_thread_join(struct aws_thread *thread) {
return AWS_OP_SUCCESS;
}
-aws_thread_id_t aws_thread_current_thread_id(void) {
- return pthread_self();
+aws_thread_id_t aws_thread_current_thread_id(void) {
+ return pthread_self();
+}
+
+bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2) {
+ return pthread_equal(t1, t2) != 0;
}
-bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2) {
- return pthread_equal(t1, t2) != 0;
-}
-
void aws_thread_current_sleep(uint64_t nanos) {
uint64_t nano = 0;
time_t seconds = (time_t)aws_timestamp_convert(nanos, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &nano);
@@ -284,19 +284,19 @@ void aws_thread_current_sleep(uint64_t nanos) {
nanosleep(&tm, &output);
}
-
-int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data) {
- if (!tl_wrapper) {
- return aws_raise_error(AWS_ERROR_THREAD_NOT_JOINABLE);
- }
-
- struct thread_atexit_callback *cb = aws_mem_calloc(tl_wrapper->allocator, 1, sizeof(struct thread_atexit_callback));
- if (!cb) {
- return AWS_OP_ERR;
- }
- cb->callback = callback;
- cb->user_data = user_data;
- cb->next = tl_wrapper->atexit;
- tl_wrapper->atexit = cb;
- return AWS_OP_SUCCESS;
-}
+
+int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data) {
+ if (!tl_wrapper) {
+ return aws_raise_error(AWS_ERROR_THREAD_NOT_JOINABLE);
+ }
+
+ struct thread_atexit_callback *cb = aws_mem_calloc(tl_wrapper->allocator, 1, sizeof(struct thread_atexit_callback));
+ if (!cb) {
+ return AWS_OP_ERR;
+ }
+ cb->callback = callback;
+ cb->user_data = user_data;
+ cb->next = tl_wrapper->atexit;
+ tl_wrapper->atexit = cb;
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/time.c b/contrib/restricted/aws/aws-c-common/source/posix/time.c
index 6ce3bf2e2b..dd49d6b0b6 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/time.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/time.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/time.h>
diff --git a/contrib/restricted/aws/aws-c-common/source/priority_queue.c b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
index eba803664a..14ff421d5f 100644
--- a/contrib/restricted/aws/aws-c-common/source/priority_queue.c
+++ b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/priority_queue.h>
@@ -12,16 +12,16 @@
#define RIGHT_OF(index) (((index) << 1) + 2)
static void s_swap(struct aws_priority_queue *queue, size_t a, size_t b) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(a < queue->container.length);
- AWS_PRECONDITION(b < queue->container.length);
- AWS_PRECONDITION(aws_priority_queue_backpointer_index_valid(queue, a));
- AWS_PRECONDITION(aws_priority_queue_backpointer_index_valid(queue, b));
-
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(a < queue->container.length);
+ AWS_PRECONDITION(b < queue->container.length);
+ AWS_PRECONDITION(aws_priority_queue_backpointer_index_valid(queue, a));
+ AWS_PRECONDITION(aws_priority_queue_backpointer_index_valid(queue, b));
+
aws_array_list_swap(&queue->container, a, b);
/* Invariant: If the backpointer array is initialized, we have enough room for all elements */
- if (!AWS_IS_ZEROED(queue->backpointers)) {
+ if (!AWS_IS_ZEROED(queue->backpointers)) {
AWS_ASSERT(queue->backpointers.length > a);
AWS_ASSERT(queue->backpointers.length > b);
@@ -40,17 +40,17 @@ static void s_swap(struct aws_priority_queue *queue, size_t a, size_t b) {
(*bp_b)->current_index = b;
}
}
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
- AWS_POSTCONDITION(aws_priority_queue_backpointer_index_valid(queue, a));
- AWS_POSTCONDITION(aws_priority_queue_backpointer_index_valid(queue, b));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_backpointer_index_valid(queue, a));
+ AWS_POSTCONDITION(aws_priority_queue_backpointer_index_valid(queue, b));
}
/* Precondition: with the exception of the given root element, the container must be
* in heap order */
static bool s_sift_down(struct aws_priority_queue *queue, size_t root) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(root < queue->container.length);
-
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(root < queue->container.length);
+
bool did_move = false;
size_t len = aws_array_list_length(&queue->container);
@@ -89,15 +89,15 @@ static bool s_sift_down(struct aws_priority_queue *queue, size_t root) {
}
}
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
return did_move;
}
/* Precondition: Elements prior to the specified index must be in heap order. */
static bool s_sift_up(struct aws_priority_queue *queue, size_t index) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(index < queue->container.length);
-
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(index < queue->container.length);
+
bool did_move = false;
void *parent_item, *child_item;
@@ -123,7 +123,7 @@ static bool s_sift_up(struct aws_priority_queue *queue, size_t index) {
}
}
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
return did_move;
}
@@ -132,14 +132,14 @@ static bool s_sift_up(struct aws_priority_queue *queue, size_t index) {
* In particular, the parent of the current index is a predecessor of all children of the current index.
*/
static void s_sift_either(struct aws_priority_queue *queue, size_t index) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(index < queue->container.length);
-
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(index < queue->container.length);
+
if (!index || !s_sift_up(queue, index)) {
s_sift_down(queue, index);
}
-
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
}
int aws_priority_queue_init_dynamic(
@@ -149,21 +149,21 @@ int aws_priority_queue_init_dynamic(
size_t item_size,
aws_priority_queue_compare_fn *pred) {
- AWS_FATAL_PRECONDITION(queue != NULL);
- AWS_FATAL_PRECONDITION(alloc != NULL);
- AWS_FATAL_PRECONDITION(item_size > 0);
-
+ AWS_FATAL_PRECONDITION(queue != NULL);
+ AWS_FATAL_PRECONDITION(alloc != NULL);
+ AWS_FATAL_PRECONDITION(item_size > 0);
+
queue->pred = pred;
AWS_ZERO_STRUCT(queue->backpointers);
- int ret = aws_array_list_init_dynamic(&queue->container, alloc, default_size, item_size);
- if (ret == AWS_OP_SUCCESS) {
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
- } else {
- AWS_POSTCONDITION(AWS_IS_ZEROED(queue->container));
- AWS_POSTCONDITION(AWS_IS_ZEROED(queue->backpointers));
- }
- return ret;
+ int ret = aws_array_list_init_dynamic(&queue->container, alloc, default_size, item_size);
+ if (ret == AWS_OP_SUCCESS) {
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ } else {
+ AWS_POSTCONDITION(AWS_IS_ZEROED(queue->container));
+ AWS_POSTCONDITION(AWS_IS_ZEROED(queue->backpointers));
+ }
+ return ret;
}
void aws_priority_queue_init_static(
@@ -173,113 +173,113 @@ void aws_priority_queue_init_static(
size_t item_size,
aws_priority_queue_compare_fn *pred) {
- AWS_FATAL_PRECONDITION(queue != NULL);
- AWS_FATAL_PRECONDITION(heap != NULL);
- AWS_FATAL_PRECONDITION(item_count > 0);
- AWS_FATAL_PRECONDITION(item_size > 0);
-
+ AWS_FATAL_PRECONDITION(queue != NULL);
+ AWS_FATAL_PRECONDITION(heap != NULL);
+ AWS_FATAL_PRECONDITION(item_count > 0);
+ AWS_FATAL_PRECONDITION(item_size > 0);
+
queue->pred = pred;
AWS_ZERO_STRUCT(queue->backpointers);
aws_array_list_init_static(&queue->container, heap, item_count, item_size);
-
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+}
+
+bool aws_priority_queue_backpointer_index_valid(const struct aws_priority_queue *const queue, size_t index) {
+ if (AWS_IS_ZEROED(queue->backpointers)) {
+ return true;
+ }
+ if (index < queue->backpointers.length) {
+ struct aws_priority_queue_node *node = ((struct aws_priority_queue_node **)queue->backpointers.data)[index];
+ return (node == NULL) || AWS_MEM_IS_WRITABLE(node, sizeof(struct aws_priority_queue_node));
+ }
+ return false;
+}
+
+bool aws_priority_queue_backpointers_valid_deep(const struct aws_priority_queue *const queue) {
+ if (!queue) {
+ return false;
+ }
+ for (size_t i = 0; i < queue->backpointers.length; i++) {
+ if (!aws_priority_queue_backpointer_index_valid(queue, i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool aws_priority_queue_backpointers_valid(const struct aws_priority_queue *const queue) {
+ if (!queue) {
+ return false;
+ }
+
+ /* Internal container validity */
+ bool backpointer_list_is_valid =
+ ((aws_array_list_is_valid(&queue->backpointers) && (queue->backpointers.current_size != 0) &&
+ (queue->backpointers.data != NULL)));
+
+ /* Backpointer struct should either be zero or should be
+ * initialized to be at most as long as the container, and having
+ * as elements potentially null pointers to
+ * aws_priority_queue_nodes */
+ bool backpointer_list_item_size = queue->backpointers.item_size == sizeof(struct aws_priority_queue_node *);
+ bool lists_equal_lengths = queue->backpointers.length == queue->container.length;
+ bool backpointers_non_zero_current_size = queue->backpointers.current_size > 0;
+
+ /* This check must be guarded, as it is not efficient, neither
+ * when running tests nor CBMC */
+#if (AWS_DEEP_CHECKS == 1)
+ bool backpointers_valid_deep = aws_priority_queue_backpointers_valid_deep(queue);
+#else
+ bool backpointers_valid_deep = true;
+#endif
+ bool backpointers_zero =
+ (queue->backpointers.current_size == 0 && queue->backpointers.length == 0 && queue->backpointers.data == NULL);
+ bool backpointer_struct_is_valid =
+ backpointers_zero || (backpointer_list_item_size && lists_equal_lengths && backpointers_non_zero_current_size &&
+ backpointers_valid_deep);
+
+ return ((backpointer_list_is_valid && backpointer_struct_is_valid) || AWS_IS_ZEROED(queue->backpointers));
}
-bool aws_priority_queue_backpointer_index_valid(const struct aws_priority_queue *const queue, size_t index) {
- if (AWS_IS_ZEROED(queue->backpointers)) {
- return true;
- }
- if (index < queue->backpointers.length) {
- struct aws_priority_queue_node *node = ((struct aws_priority_queue_node **)queue->backpointers.data)[index];
- return (node == NULL) || AWS_MEM_IS_WRITABLE(node, sizeof(struct aws_priority_queue_node));
- }
- return false;
-}
-
-bool aws_priority_queue_backpointers_valid_deep(const struct aws_priority_queue *const queue) {
- if (!queue) {
- return false;
- }
- for (size_t i = 0; i < queue->backpointers.length; i++) {
- if (!aws_priority_queue_backpointer_index_valid(queue, i)) {
- return false;
- }
- }
- return true;
-}
-
-bool aws_priority_queue_backpointers_valid(const struct aws_priority_queue *const queue) {
- if (!queue) {
- return false;
- }
-
- /* Internal container validity */
- bool backpointer_list_is_valid =
- ((aws_array_list_is_valid(&queue->backpointers) && (queue->backpointers.current_size != 0) &&
- (queue->backpointers.data != NULL)));
-
- /* Backpointer struct should either be zero or should be
- * initialized to be at most as long as the container, and having
- * as elements potentially null pointers to
- * aws_priority_queue_nodes */
- bool backpointer_list_item_size = queue->backpointers.item_size == sizeof(struct aws_priority_queue_node *);
- bool lists_equal_lengths = queue->backpointers.length == queue->container.length;
- bool backpointers_non_zero_current_size = queue->backpointers.current_size > 0;
-
- /* This check must be guarded, as it is not efficient, neither
- * when running tests nor CBMC */
-#if (AWS_DEEP_CHECKS == 1)
- bool backpointers_valid_deep = aws_priority_queue_backpointers_valid_deep(queue);
-#else
- bool backpointers_valid_deep = true;
-#endif
- bool backpointers_zero =
- (queue->backpointers.current_size == 0 && queue->backpointers.length == 0 && queue->backpointers.data == NULL);
- bool backpointer_struct_is_valid =
- backpointers_zero || (backpointer_list_item_size && lists_equal_lengths && backpointers_non_zero_current_size &&
- backpointers_valid_deep);
-
- return ((backpointer_list_is_valid && backpointer_struct_is_valid) || AWS_IS_ZEROED(queue->backpointers));
-}
-
bool aws_priority_queue_is_valid(const struct aws_priority_queue *const queue) {
- /* Pointer validity checks */
+ /* Pointer validity checks */
if (!queue) {
return false;
}
bool pred_is_valid = (queue->pred != NULL);
bool container_is_valid = aws_array_list_is_valid(&queue->container);
-
- bool backpointers_valid = aws_priority_queue_backpointers_valid(queue);
- return pred_is_valid && container_is_valid && backpointers_valid;
+
+ bool backpointers_valid = aws_priority_queue_backpointers_valid(queue);
+ return pred_is_valid && container_is_valid && backpointers_valid;
}
void aws_priority_queue_clean_up(struct aws_priority_queue *queue) {
aws_array_list_clean_up(&queue->container);
- if (!AWS_IS_ZEROED(queue->backpointers)) {
- aws_array_list_clean_up(&queue->backpointers);
- }
+ if (!AWS_IS_ZEROED(queue->backpointers)) {
+ aws_array_list_clean_up(&queue->backpointers);
+ }
}
int aws_priority_queue_push(struct aws_priority_queue *queue, void *item) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(item && AWS_MEM_IS_READABLE(item, queue->container.item_size));
- int rval = aws_priority_queue_push_ref(queue, item, NULL);
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
- return rval;
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(item && AWS_MEM_IS_READABLE(item, queue->container.item_size));
+ int rval = aws_priority_queue_push_ref(queue, item, NULL);
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ return rval;
}
int aws_priority_queue_push_ref(
struct aws_priority_queue *queue,
void *item,
struct aws_priority_queue_node *backpointer) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(item && AWS_MEM_IS_READABLE(item, queue->container.item_size));
-
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(item && AWS_MEM_IS_READABLE(item, queue->container.item_size));
+
int err = aws_array_list_push_back(&queue->container, item);
if (err) {
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
return err;
}
size_t index = aws_array_list_length(&queue->container) - 1;
@@ -304,7 +304,7 @@ int aws_priority_queue_push_ref(
* for all elements; otherwise, sift_down gets complicated if it runs out of memory when sifting an
* element with a backpointer down in the array.
*/
- if (!AWS_IS_ZEROED(queue->backpointers)) {
+ if (!AWS_IS_ZEROED(queue->backpointers)) {
if (aws_array_list_set_at(&queue->backpointers, &backpointer, index)) {
goto backpointer_update_failed;
}
@@ -316,22 +316,22 @@ int aws_priority_queue_push_ref(
s_sift_up(queue, aws_array_list_length(&queue->container) - 1);
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
return AWS_OP_SUCCESS;
backpointer_update_failed:
/* Failed to initialize or grow the backpointer array, back out the node addition */
aws_array_list_pop_back(&queue->container);
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
return AWS_OP_ERR;
}
static int s_remove_node(struct aws_priority_queue *queue, void *item, size_t item_index) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size));
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size));
if (aws_array_list_get_at(&queue->container, item, item_index)) {
/* shouldn't happen, but if it does we've already raised an error... */
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
return AWS_OP_ERR;
}
@@ -342,21 +342,21 @@ static int s_remove_node(struct aws_priority_queue *queue, void *item, size_t it
s_swap(queue, item_index, swap_with);
}
- aws_array_list_pop_back(&queue->container);
-
- if (!AWS_IS_ZEROED(queue->backpointers)) {
- aws_array_list_get_at(&queue->backpointers, &backpointer, swap_with);
- if (backpointer) {
- backpointer->current_index = SIZE_MAX;
- }
- aws_array_list_pop_back(&queue->backpointers);
+ aws_array_list_pop_back(&queue->container);
+
+ if (!AWS_IS_ZEROED(queue->backpointers)) {
+ aws_array_list_get_at(&queue->backpointers, &backpointer, swap_with);
+ if (backpointer) {
+ backpointer->current_index = SIZE_MAX;
+ }
+ aws_array_list_pop_back(&queue->backpointers);
}
if (item_index != swap_with) {
s_sift_either(queue, item_index);
}
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
return AWS_OP_SUCCESS;
}
@@ -364,30 +364,30 @@ int aws_priority_queue_remove(
struct aws_priority_queue *queue,
void *item,
const struct aws_priority_queue_node *node) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size));
- AWS_PRECONDITION(node && AWS_MEM_IS_READABLE(node, sizeof(struct aws_priority_queue_node)));
- AWS_ERROR_PRECONDITION(
- node->current_index < aws_array_list_length(&queue->container), AWS_ERROR_PRIORITY_QUEUE_BAD_NODE);
- AWS_ERROR_PRECONDITION(queue->backpointers.data, AWS_ERROR_PRIORITY_QUEUE_BAD_NODE);
-
- int rval = s_remove_node(queue, item, node->current_index);
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
- return rval;
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size));
+ AWS_PRECONDITION(node && AWS_MEM_IS_READABLE(node, sizeof(struct aws_priority_queue_node)));
+ AWS_ERROR_PRECONDITION(
+ node->current_index < aws_array_list_length(&queue->container), AWS_ERROR_PRIORITY_QUEUE_BAD_NODE);
+ AWS_ERROR_PRECONDITION(queue->backpointers.data, AWS_ERROR_PRIORITY_QUEUE_BAD_NODE);
+
+ int rval = s_remove_node(queue, item, node->current_index);
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ return rval;
}
int aws_priority_queue_pop(struct aws_priority_queue *queue, void *item) {
- AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
- AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size));
- AWS_ERROR_PRECONDITION(aws_array_list_length(&queue->container) != 0, AWS_ERROR_PRIORITY_QUEUE_EMPTY);
+ AWS_PRECONDITION(aws_priority_queue_is_valid(queue));
+ AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size));
+ AWS_ERROR_PRECONDITION(aws_array_list_length(&queue->container) != 0, AWS_ERROR_PRIORITY_QUEUE_EMPTY);
- int rval = s_remove_node(queue, item, 0);
- AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
- return rval;
+ int rval = s_remove_node(queue, item, 0);
+ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue));
+ return rval;
}
int aws_priority_queue_top(const struct aws_priority_queue *queue, void **item) {
- AWS_ERROR_PRECONDITION(aws_array_list_length(&queue->container) != 0, AWS_ERROR_PRIORITY_QUEUE_EMPTY);
+ AWS_ERROR_PRECONDITION(aws_array_list_length(&queue->container) != 0, AWS_ERROR_PRIORITY_QUEUE_EMPTY);
return aws_array_list_get_at_ptr(&queue->container, item, 0);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/process_common.c b/contrib/restricted/aws/aws-c-common/source/process_common.c
index e2d266edba..9b734c46f8 100644
--- a/contrib/restricted/aws/aws-c-common/source/process_common.c
+++ b/contrib/restricted/aws/aws-c-common/source/process_common.c
@@ -1,82 +1,82 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/process.h>
-#include <aws/common/string.h>
-
-#include <stdio.h>
-#include <sys/types.h>
-
-#define MAX_BUFFER_SIZE (2048)
-
-int aws_run_command_result_init(struct aws_allocator *allocator, struct aws_run_command_result *result) {
- if (!allocator || !result) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
- AWS_ZERO_STRUCT(*result);
- return AWS_OP_SUCCESS;
-}
-
-void aws_run_command_result_cleanup(struct aws_run_command_result *result) {
- if (!result) {
- return;
- }
- aws_string_destroy_secure(result->std_out);
- aws_string_destroy_secure(result->std_err);
-}
-
-int aws_run_command(
- struct aws_allocator *allocator,
- struct aws_run_command_options *options,
- struct aws_run_command_result *result) {
-
- AWS_FATAL_ASSERT(allocator);
- AWS_FATAL_ASSERT(options);
- AWS_FATAL_ASSERT(result);
-
- FILE *output_stream;
- char output_buffer[MAX_BUFFER_SIZE];
- struct aws_byte_buf result_buffer;
- int ret = AWS_OP_ERR;
- if (aws_byte_buf_init(&result_buffer, allocator, MAX_BUFFER_SIZE)) {
- goto on_finish;
- }
-
-#ifdef _WIN32
- output_stream = _popen(options->command, "r");
-#else
- output_stream = popen(options->command, "r");
-#endif
-
- if (output_stream) {
- while (!feof(output_stream)) {
- if (fgets(output_buffer, MAX_BUFFER_SIZE, output_stream) != NULL) {
- struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(output_buffer);
- if (aws_byte_buf_append_dynamic(&result_buffer, &cursor)) {
- goto on_finish;
- }
- }
- }
-#ifdef _WIN32
- result->ret_code = _pclose(output_stream);
-#else
- result->ret_code = pclose(output_stream);
-#endif
- }
-
- struct aws_byte_cursor trim_cursor = aws_byte_cursor_from_buf(&result_buffer);
- struct aws_byte_cursor trimmed_cursor = aws_byte_cursor_trim_pred(&trim_cursor, aws_char_is_space);
- if (trimmed_cursor.len) {
- result->std_out = aws_string_new_from_array(allocator, trimmed_cursor.ptr, trimmed_cursor.len);
- if (!result->std_out) {
- goto on_finish;
- }
- }
- ret = AWS_OP_SUCCESS;
-
-on_finish:
- aws_byte_buf_clean_up_secure(&result_buffer);
- return ret;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/process.h>
+#include <aws/common/string.h>
+
+#include <stdio.h>
+#include <sys/types.h>
+
+#define MAX_BUFFER_SIZE (2048)
+
+int aws_run_command_result_init(struct aws_allocator *allocator, struct aws_run_command_result *result) {
+ if (!allocator || !result) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ AWS_ZERO_STRUCT(*result);
+ return AWS_OP_SUCCESS;
+}
+
+void aws_run_command_result_cleanup(struct aws_run_command_result *result) {
+ if (!result) {
+ return;
+ }
+ aws_string_destroy_secure(result->std_out);
+ aws_string_destroy_secure(result->std_err);
+}
+
+int aws_run_command(
+ struct aws_allocator *allocator,
+ struct aws_run_command_options *options,
+ struct aws_run_command_result *result) {
+
+ AWS_FATAL_ASSERT(allocator);
+ AWS_FATAL_ASSERT(options);
+ AWS_FATAL_ASSERT(result);
+
+ FILE *output_stream;
+ char output_buffer[MAX_BUFFER_SIZE];
+ struct aws_byte_buf result_buffer;
+ int ret = AWS_OP_ERR;
+ if (aws_byte_buf_init(&result_buffer, allocator, MAX_BUFFER_SIZE)) {
+ goto on_finish;
+ }
+
+#ifdef _WIN32
+ output_stream = _popen(options->command, "r");
+#else
+ output_stream = popen(options->command, "r");
+#endif
+
+ if (output_stream) {
+ while (!feof(output_stream)) {
+ if (fgets(output_buffer, MAX_BUFFER_SIZE, output_stream) != NULL) {
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(output_buffer);
+ if (aws_byte_buf_append_dynamic(&result_buffer, &cursor)) {
+ goto on_finish;
+ }
+ }
+ }
+#ifdef _WIN32
+ result->ret_code = _pclose(output_stream);
+#else
+ result->ret_code = pclose(output_stream);
+#endif
+ }
+
+ struct aws_byte_cursor trim_cursor = aws_byte_cursor_from_buf(&result_buffer);
+ struct aws_byte_cursor trimmed_cursor = aws_byte_cursor_trim_pred(&trim_cursor, aws_char_is_space);
+ if (trimmed_cursor.len) {
+ result->std_out = aws_string_new_from_array(allocator, trimmed_cursor.ptr, trimmed_cursor.len);
+ if (!result->std_out) {
+ goto on_finish;
+ }
+ }
+ ret = AWS_OP_SUCCESS;
+
+on_finish:
+ aws_byte_buf_clean_up_secure(&result_buffer);
+ return ret;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/ref_count.c b/contrib/restricted/aws/aws-c-common/source/ref_count.c
index 30658cd22a..a1d938b022 100644
--- a/contrib/restricted/aws/aws-c-common/source/ref_count.c
+++ b/contrib/restricted/aws/aws-c-common/source/ref_count.c
@@ -1,80 +1,80 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-#include <aws/common/ref_count.h>
-
-#include <aws/common/clock.h>
-#include <aws/common/condition_variable.h>
-#include <aws/common/mutex.h>
-
-void aws_ref_count_init(struct aws_ref_count *ref_count, void *object, aws_simple_completion_callback *on_zero_fn) {
- aws_atomic_init_int(&ref_count->ref_count, 1);
- ref_count->object = object;
- ref_count->on_zero_fn = on_zero_fn;
-}
-
-void *aws_ref_count_acquire(struct aws_ref_count *ref_count) {
- aws_atomic_fetch_add(&ref_count->ref_count, 1);
-
- return ref_count->object;
-}
-
-size_t aws_ref_count_release(struct aws_ref_count *ref_count) {
- size_t old_value = aws_atomic_fetch_sub(&ref_count->ref_count, 1);
- AWS_ASSERT(old_value > 0 && "refcount has gone negative");
- if (old_value == 1) {
- ref_count->on_zero_fn(ref_count->object);
- }
-
- return old_value - 1;
-}
-
-static struct aws_condition_variable s_global_thread_signal = AWS_CONDITION_VARIABLE_INIT;
-static struct aws_mutex s_global_thread_lock = AWS_MUTEX_INIT;
-static uint32_t s_global_thread_count = 0;
-
-void aws_global_thread_creator_increment(void) {
- aws_mutex_lock(&s_global_thread_lock);
- ++s_global_thread_count;
- aws_mutex_unlock(&s_global_thread_lock);
-}
-
-void aws_global_thread_creator_decrement(void) {
- bool signal = false;
- aws_mutex_lock(&s_global_thread_lock);
- AWS_ASSERT(s_global_thread_count != 0 && "global tracker has gone negative");
- --s_global_thread_count;
- if (s_global_thread_count == 0) {
- signal = true;
- }
- aws_mutex_unlock(&s_global_thread_lock);
-
- if (signal) {
- aws_condition_variable_notify_all(&s_global_thread_signal);
- }
-}
-
-static bool s_thread_count_zero_pred(void *user_data) {
- (void)user_data;
-
- return s_global_thread_count == 0;
-}
-
-void aws_global_thread_creator_shutdown_wait(void) {
- aws_mutex_lock(&s_global_thread_lock);
- aws_condition_variable_wait_pred(&s_global_thread_signal, &s_global_thread_lock, s_thread_count_zero_pred, NULL);
- aws_mutex_unlock(&s_global_thread_lock);
-}
-
-int aws_global_thread_creator_shutdown_wait_for(uint32_t wait_timeout_in_seconds) {
- int64_t wait_time_in_nanos =
- aws_timestamp_convert(wait_timeout_in_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
-
- aws_mutex_lock(&s_global_thread_lock);
- int result = aws_condition_variable_wait_for_pred(
- &s_global_thread_signal, &s_global_thread_lock, wait_time_in_nanos, s_thread_count_zero_pred, NULL);
- aws_mutex_unlock(&s_global_thread_lock);
-
- return result;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/ref_count.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/mutex.h>
+
+void aws_ref_count_init(struct aws_ref_count *ref_count, void *object, aws_simple_completion_callback *on_zero_fn) {
+ aws_atomic_init_int(&ref_count->ref_count, 1);
+ ref_count->object = object;
+ ref_count->on_zero_fn = on_zero_fn;
+}
+
+void *aws_ref_count_acquire(struct aws_ref_count *ref_count) {
+ aws_atomic_fetch_add(&ref_count->ref_count, 1);
+
+ return ref_count->object;
+}
+
+size_t aws_ref_count_release(struct aws_ref_count *ref_count) {
+ size_t old_value = aws_atomic_fetch_sub(&ref_count->ref_count, 1);
+ AWS_ASSERT(old_value > 0 && "refcount has gone negative");
+ if (old_value == 1) {
+ ref_count->on_zero_fn(ref_count->object);
+ }
+
+ return old_value - 1;
+}
+
+static struct aws_condition_variable s_global_thread_signal = AWS_CONDITION_VARIABLE_INIT;
+static struct aws_mutex s_global_thread_lock = AWS_MUTEX_INIT;
+static uint32_t s_global_thread_count = 0;
+
+void aws_global_thread_creator_increment(void) {
+ aws_mutex_lock(&s_global_thread_lock);
+ ++s_global_thread_count;
+ aws_mutex_unlock(&s_global_thread_lock);
+}
+
+void aws_global_thread_creator_decrement(void) {
+ bool signal = false;
+ aws_mutex_lock(&s_global_thread_lock);
+ AWS_ASSERT(s_global_thread_count != 0 && "global tracker has gone negative");
+ --s_global_thread_count;
+ if (s_global_thread_count == 0) {
+ signal = true;
+ }
+ aws_mutex_unlock(&s_global_thread_lock);
+
+ if (signal) {
+ aws_condition_variable_notify_all(&s_global_thread_signal);
+ }
+}
+
+static bool s_thread_count_zero_pred(void *user_data) {
+ (void)user_data;
+
+ return s_global_thread_count == 0;
+}
+
+void aws_global_thread_creator_shutdown_wait(void) {
+ aws_mutex_lock(&s_global_thread_lock);
+ aws_condition_variable_wait_pred(&s_global_thread_signal, &s_global_thread_lock, s_thread_count_zero_pred, NULL);
+ aws_mutex_unlock(&s_global_thread_lock);
+}
+
+int aws_global_thread_creator_shutdown_wait_for(uint32_t wait_timeout_in_seconds) {
+ int64_t wait_time_in_nanos =
+ aws_timestamp_convert(wait_timeout_in_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
+
+ aws_mutex_lock(&s_global_thread_lock);
+ int result = aws_condition_variable_wait_for_pred(
+ &s_global_thread_signal, &s_global_thread_lock, wait_time_in_nanos, s_thread_count_zero_pred, NULL);
+ aws_mutex_unlock(&s_global_thread_lock);
+
+ return result;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/resource_name.c b/contrib/restricted/aws/aws-c-common/source/resource_name.c
index 7784a7d9aa..0a7b972ea1 100644
--- a/contrib/restricted/aws/aws-c-common/source/resource_name.c
+++ b/contrib/restricted/aws/aws-c-common/source/resource_name.c
@@ -1,111 +1,111 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/resource_name.h>
-
-#define ARN_SPLIT_COUNT ((size_t)5)
-#define ARN_PARTS_COUNT ((size_t)6)
-
-static const char ARN_DELIMETER[] = ":";
-static const char ARN_DELIMETER_CHAR = ':';
-
-static const size_t DELIMETER_LEN = 8; /* strlen("arn:::::") */
-
-AWS_COMMON_API
-int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input) {
- struct aws_byte_cursor arn_parts[ARN_PARTS_COUNT];
- struct aws_array_list arn_part_list;
- aws_array_list_init_static(&arn_part_list, arn_parts, ARN_PARTS_COUNT, sizeof(struct aws_byte_cursor));
- if (aws_byte_cursor_split_on_char_n(input, ARN_DELIMETER_CHAR, ARN_SPLIT_COUNT, &arn_part_list)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- struct aws_byte_cursor *arn_prefix;
- if (aws_array_list_get_at_ptr(&arn_part_list, (void **)&arn_prefix, 0) ||
- !aws_byte_cursor_eq_c_str(arn_prefix, "arn")) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->partition, 1)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->service, 2)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->region, 3)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->account_id, 4) || aws_byte_cursor_eq_c_str(&arn->account_id, "")) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- if (aws_array_list_get_at(&arn_part_list, &arn->resource_id, 5)) {
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
- return AWS_OP_SUCCESS;
-}
-
-AWS_COMMON_API
-int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size) {
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
-
- *size = arn->partition.len + arn->region.len + arn->service.len + arn->account_id.len + arn->resource_id.len +
- DELIMETER_LEN;
-
- return AWS_OP_SUCCESS;
-}
-
-AWS_COMMON_API
-int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn) {
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
- AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
-
- const struct aws_byte_cursor prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:");
- const struct aws_byte_cursor colon_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ARN_DELIMETER);
-
- if (aws_byte_buf_append(buf, &prefix)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &arn->partition)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->service)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->region)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->account_id)) {
- return aws_raise_error(aws_last_error());
- }
- if (aws_byte_buf_append(buf, &colon_cur)) {
- return aws_raise_error(aws_last_error());
- }
-
- if (aws_byte_buf_append(buf, &arn->resource_id)) {
- return aws_raise_error(aws_last_error());
- }
-
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return AWS_OP_SUCCESS;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/resource_name.h>
+
+#define ARN_SPLIT_COUNT ((size_t)5)
+#define ARN_PARTS_COUNT ((size_t)6)
+
+static const char ARN_DELIMETER[] = ":";
+static const char ARN_DELIMETER_CHAR = ':';
+
+static const size_t DELIMETER_LEN = 8; /* strlen("arn:::::") */
+
+AWS_COMMON_API
+int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input) {
+ struct aws_byte_cursor arn_parts[ARN_PARTS_COUNT];
+ struct aws_array_list arn_part_list;
+ aws_array_list_init_static(&arn_part_list, arn_parts, ARN_PARTS_COUNT, sizeof(struct aws_byte_cursor));
+ if (aws_byte_cursor_split_on_char_n(input, ARN_DELIMETER_CHAR, ARN_SPLIT_COUNT, &arn_part_list)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ struct aws_byte_cursor *arn_prefix;
+ if (aws_array_list_get_at_ptr(&arn_part_list, (void **)&arn_prefix, 0) ||
+ !aws_byte_cursor_eq_c_str(arn_prefix, "arn")) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->partition, 1)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->service, 2)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->region, 3)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->account_id, 4) || aws_byte_cursor_eq_c_str(&arn->account_id, "")) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->resource_id, 5)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+AWS_COMMON_API
+int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
+
+ *size = arn->partition.len + arn->region.len + arn->service.len + arn->account_id.len + arn->resource_id.len +
+ DELIMETER_LEN;
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_COMMON_API
+int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
+
+ const struct aws_byte_cursor prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:");
+ const struct aws_byte_cursor colon_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ARN_DELIMETER);
+
+ if (aws_byte_buf_append(buf, &prefix)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &arn->partition)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->service)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->region)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->account_id)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->resource_id)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/ring_buffer.c b/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
index 56148f011c..6ebecebf47 100644
--- a/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
+++ b/contrib/restricted/aws/aws-c-common/source/ring_buffer.c
@@ -1,321 +1,321 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/ring_buffer.h>
-
-#include <aws/common/byte_buf.h>
-
-#ifdef CBMC
-# define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
- dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); \
- assert(__CPROVER_same_object(dest_ptr, ring_buf->allocation)); \
- assert(aws_ring_buffer_check_atomic_ptr(ring_buf, dest_ptr));
-# define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
- assert(aws_ring_buffer_check_atomic_ptr(ring_buf, src_ptr)); \
- aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
-#else
-# define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
- dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order);
-# define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
- aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
-#endif
-#define AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, dest_ptr) \
- AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->tail, aws_memory_order_acquire);
-#define AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, src_ptr) \
- AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->tail, src_ptr, aws_memory_order_release);
-#define AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, dest_ptr) \
- AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->head, aws_memory_order_relaxed);
-#define AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, src_ptr) \
- AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->head, src_ptr, aws_memory_order_relaxed);
-
-int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size) {
- AWS_PRECONDITION(ring_buf != NULL);
- AWS_PRECONDITION(allocator != NULL);
- AWS_PRECONDITION(size > 0);
-
- AWS_ZERO_STRUCT(*ring_buf);
-
- ring_buf->allocation = aws_mem_acquire(allocator, size);
-
- if (!ring_buf->allocation) {
- return AWS_OP_ERR;
- }
-
- ring_buf->allocator = allocator;
- aws_atomic_init_ptr(&ring_buf->head, ring_buf->allocation);
- aws_atomic_init_ptr(&ring_buf->tail, ring_buf->allocation);
- ring_buf->allocation_end = ring_buf->allocation + size;
-
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- return AWS_OP_SUCCESS;
-}
-
-void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
- if (ring_buf->allocation) {
- aws_mem_release(ring_buf->allocator, ring_buf->allocation);
- }
-
- AWS_ZERO_STRUCT(*ring_buf);
-}
-
-int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
- AWS_ERROR_PRECONDITION(requested_size != 0);
-
- uint8_t *tail_cpy;
- uint8_t *head_cpy;
- AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
- AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
-
- /* this branch is, we don't have any vended buffers. */
- if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
-
- if (requested_size > ring_space) {
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
- }
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
- AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
-
- /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
- /* after N + 1 wraps */
- if (tail_cpy > head_cpy) {
- size_t space = tail_cpy - head_cpy - 1;
-
- if (space >= requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* After N wraps */
- } else if (tail_cpy < head_cpy) {
- /* prefer the head space for efficiency. */
- if ((size_t)(ring_buf->allocation_end - head_cpy) >= requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
-
- if ((size_t)(tail_cpy - ring_buf->allocation) > requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- }
-
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
-}
-
-int aws_ring_buffer_acquire_up_to(
- struct aws_ring_buffer *ring_buf,
- size_t minimum_size,
- size_t requested_size,
- struct aws_byte_buf *dest) {
- AWS_PRECONDITION(requested_size >= minimum_size);
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
-
- if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) {
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- uint8_t *tail_cpy;
- uint8_t *head_cpy;
- AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
- AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
-
- /* this branch is, we don't have any vended buffers. */
- if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
-
- size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
-
- if (allocation_size < minimum_size) {
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
- }
-
- /* go as big as we can. */
- /* we don't have any vended, so this should be safe. */
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size);
- AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
- /* after N + 1 wraps */
- if (tail_cpy > head_cpy) {
- size_t space = tail_cpy - head_cpy;
- /* this shouldn't be possible. */
- AWS_ASSERT(space);
- space -= 1;
-
- size_t returnable_size = space > requested_size ? requested_size : space;
-
- if (returnable_size >= minimum_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* after N wraps */
- } else if (tail_cpy < head_cpy) {
- size_t head_space = ring_buf->allocation_end - head_cpy;
- size_t tail_space = tail_cpy - ring_buf->allocation;
-
- /* if you can vend the whole thing do it. Also prefer head space to tail space. */
- if (head_space >= requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
-
- if (tail_space > requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
-
- /* now vend as much as possible, once again preferring head space. */
- if (head_space >= minimum_size && head_space >= tail_space) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space);
- *dest = aws_byte_buf_from_empty_array(head_cpy, head_space);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
-
- if (tail_space > minimum_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- }
-
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
-}
-
-static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
-#ifdef CBMC
- /* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
- * (C11 6.5.8) */
- if (!__CPROVER_same_object(buf->buffer, ring_buffer->allocation) ||
- !__CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)) {
- return false;
- }
-#endif
- return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
- buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
-}
-
-void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(s_buf_belongs_to_pool(ring_buffer, buf));
- AWS_ATOMIC_STORE_TAIL_PTR(ring_buffer, buf->buffer + buf->capacity);
- AWS_ZERO_STRUCT(*buf);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
-}
-
-bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- bool rval = s_buf_belongs_to_pool(ring_buffer, buf);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return rval;
-}
-
-/* Ring buffer allocator implementation */
-static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) {
- struct aws_ring_buffer *buffer = allocator->impl;
- struct aws_byte_buf buf;
- AWS_ZERO_STRUCT(buf);
- /* allocate extra space for the size */
- if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) {
- return NULL;
- }
- /* store the size ahead of the allocation */
- *((size_t *)buf.buffer) = buf.capacity;
- return buf.buffer + sizeof(size_t);
-}
-
-static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) {
- /* back up to where the size is stored */
- const void *addr = ((uint8_t *)ptr - sizeof(size_t));
- const size_t size = *((size_t *)addr);
-
- struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size);
- buf.allocator = allocator;
-
- struct aws_ring_buffer *buffer = allocator->impl;
- aws_ring_buffer_release(buffer, &buf);
-}
-
-static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
- void *mem = s_ring_buffer_mem_acquire(allocator, num * size);
- if (!mem) {
- return NULL;
- }
- memset(mem, 0, num * size);
- return mem;
-}
-
-static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) {
- (void)allocator;
- (void)ptr;
- (void)old_size;
- (void)new_size;
- AWS_FATAL_ASSERT(!"ring_buffer_allocator does not support realloc, as it breaks allocation ordering");
- return NULL;
-}
-
-int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) {
- if (allocator == NULL || ring_buffer == NULL) {
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- allocator->impl = ring_buffer;
- allocator->mem_acquire = s_ring_buffer_mem_acquire;
- allocator->mem_release = s_ring_buffer_mem_release;
- allocator->mem_calloc = s_ring_buffer_mem_calloc;
- allocator->mem_realloc = s_ring_buffer_mem_realloc;
- return AWS_OP_SUCCESS;
-}
-
-void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) {
- AWS_ZERO_STRUCT(*allocator);
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/ring_buffer.h>
+
+#include <aws/common/byte_buf.h>
+
+#ifdef CBMC
+# define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
+ dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); \
+ assert(__CPROVER_same_object(dest_ptr, ring_buf->allocation)); \
+ assert(aws_ring_buffer_check_atomic_ptr(ring_buf, dest_ptr));
+# define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
+ assert(aws_ring_buffer_check_atomic_ptr(ring_buf, src_ptr)); \
+ aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
+#else
+# define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
+ dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order);
+# define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
+ aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
+#endif
+#define AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, dest_ptr) \
+ AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->tail, aws_memory_order_acquire);
+#define AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, src_ptr) \
+ AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->tail, src_ptr, aws_memory_order_release);
+#define AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, dest_ptr) \
+ AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->head, aws_memory_order_relaxed);
+#define AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, src_ptr) \
+ AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->head, src_ptr, aws_memory_order_relaxed);
+
+int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size) {
+ AWS_PRECONDITION(ring_buf != NULL);
+ AWS_PRECONDITION(allocator != NULL);
+ AWS_PRECONDITION(size > 0);
+
+ AWS_ZERO_STRUCT(*ring_buf);
+
+ ring_buf->allocation = aws_mem_acquire(allocator, size);
+
+ if (!ring_buf->allocation) {
+ return AWS_OP_ERR;
+ }
+
+ ring_buf->allocator = allocator;
+ aws_atomic_init_ptr(&ring_buf->head, ring_buf->allocation);
+ aws_atomic_init_ptr(&ring_buf->tail, ring_buf->allocation);
+ ring_buf->allocation_end = ring_buf->allocation + size;
+
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ return AWS_OP_SUCCESS;
+}
+
+void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf) {
+ AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
+ if (ring_buf->allocation) {
+ aws_mem_release(ring_buf->allocator, ring_buf->allocation);
+ }
+
+ AWS_ZERO_STRUCT(*ring_buf);
+}
+
+int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest) {
+ AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
+ AWS_ERROR_PRECONDITION(requested_size != 0);
+
+ uint8_t *tail_cpy;
+ uint8_t *head_cpy;
+ AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
+ AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
+
+ /* this branch is, we don't have any vended buffers. */
+ if (head_cpy == tail_cpy) {
+ size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
+
+ if (requested_size > ring_space) {
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return aws_raise_error(AWS_ERROR_OOM);
+ }
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
+ AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
+ *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+
+ /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
+ /* after N + 1 wraps */
+ if (tail_cpy > head_cpy) {
+ size_t space = tail_cpy - head_cpy - 1;
+
+ if (space >= requested_size) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
+ *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+ /* After N wraps */
+ } else if (tail_cpy < head_cpy) {
+ /* prefer the head space for efficiency. */
+ if ((size_t)(ring_buf->allocation_end - head_cpy) >= requested_size) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
+ *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+
+ if ((size_t)(tail_cpy - ring_buf->allocation) > requested_size) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
+ *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return aws_raise_error(AWS_ERROR_OOM);
+}
+
+int aws_ring_buffer_acquire_up_to(
+ struct aws_ring_buffer *ring_buf,
+ size_t minimum_size,
+ size_t requested_size,
+ struct aws_byte_buf *dest) {
+ AWS_PRECONDITION(requested_size >= minimum_size);
+ AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
+
+ if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) {
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ uint8_t *tail_cpy;
+ uint8_t *head_cpy;
+ AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
+ AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
+
+ /* this branch is, we don't have any vended buffers. */
+ if (head_cpy == tail_cpy) {
+ size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
+
+ size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
+
+ if (allocation_size < minimum_size) {
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return aws_raise_error(AWS_ERROR_OOM);
+ }
+
+ /* go as big as we can. */
+ /* we don't have any vended, so this should be safe. */
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size);
+ AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
+ *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+ /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
+ /* after N + 1 wraps */
+ if (tail_cpy > head_cpy) {
+ size_t space = tail_cpy - head_cpy;
+ /* this shouldn't be possible. */
+ AWS_ASSERT(space);
+ space -= 1;
+
+ size_t returnable_size = space > requested_size ? requested_size : space;
+
+ if (returnable_size >= minimum_size) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size);
+ *dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+ /* after N wraps */
+ } else if (tail_cpy < head_cpy) {
+ size_t head_space = ring_buf->allocation_end - head_cpy;
+ size_t tail_space = tail_cpy - ring_buf->allocation;
+
+ /* if you can vend the whole thing do it. Also prefer head space to tail space. */
+ if (head_space >= requested_size) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
+ *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+
+ if (tail_space > requested_size) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
+ *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+
+ /* now vend as much as possible, once again preferring head space. */
+ if (head_space >= minimum_size && head_space >= tail_space) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space);
+ *dest = aws_byte_buf_from_empty_array(head_cpy, head_space);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+
+ if (tail_space > minimum_size) {
+ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1);
+ *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
+ return aws_raise_error(AWS_ERROR_OOM);
+}
+
+static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
+#ifdef CBMC
+ /* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
+ * (C11 6.5.8) */
+ if (!__CPROVER_same_object(buf->buffer, ring_buffer->allocation) ||
+ !__CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)) {
+ return false;
+ }
+#endif
+ return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
+ buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
+}
+
+void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf) {
+ AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(s_buf_belongs_to_pool(ring_buffer, buf));
+ AWS_ATOMIC_STORE_TAIL_PTR(ring_buffer, buf->buffer + buf->capacity);
+ AWS_ZERO_STRUCT(*buf);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
+}
+
+bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
+ AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ bool rval = s_buf_belongs_to_pool(ring_buffer, buf);
+ AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return rval;
+}
+
+/* Ring buffer allocator implementation */
+static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) {
+ struct aws_ring_buffer *buffer = allocator->impl;
+ struct aws_byte_buf buf;
+ AWS_ZERO_STRUCT(buf);
+ /* allocate extra space for the size */
+ if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) {
+ return NULL;
+ }
+ /* store the size ahead of the allocation */
+ *((size_t *)buf.buffer) = buf.capacity;
+ return buf.buffer + sizeof(size_t);
+}
+
+static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) {
+ /* back up to where the size is stored */
+ const void *addr = ((uint8_t *)ptr - sizeof(size_t));
+ const size_t size = *((size_t *)addr);
+
+ struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size);
+ buf.allocator = allocator;
+
+ struct aws_ring_buffer *buffer = allocator->impl;
+ aws_ring_buffer_release(buffer, &buf);
+}
+
+static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
+ void *mem = s_ring_buffer_mem_acquire(allocator, num * size);
+ if (!mem) {
+ return NULL;
+ }
+ memset(mem, 0, num * size);
+ return mem;
+}
+
+static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) {
+ (void)allocator;
+ (void)ptr;
+ (void)old_size;
+ (void)new_size;
+ AWS_FATAL_ASSERT(!"ring_buffer_allocator does not support realloc, as it breaks allocation ordering");
+ return NULL;
+}
+
+int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) {
+ if (allocator == NULL || ring_buffer == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ allocator->impl = ring_buffer;
+ allocator->mem_acquire = s_ring_buffer_mem_acquire;
+ allocator->mem_release = s_ring_buffer_mem_release;
+ allocator->mem_calloc = s_ring_buffer_mem_calloc;
+ allocator->mem_realloc = s_ring_buffer_mem_realloc;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*allocator);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/statistics.c b/contrib/restricted/aws/aws-c-common/source/statistics.c
index 7185cfeb5f..3d8e50d6e1 100644
--- a/contrib/restricted/aws/aws-c-common/source/statistics.c
+++ b/contrib/restricted/aws/aws-c-common/source/statistics.c
@@ -1,26 +1,26 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/statistics.h>
-
-void aws_crt_statistics_handler_process_statistics(
- struct aws_crt_statistics_handler *handler,
- struct aws_crt_statistics_sample_interval *interval,
- struct aws_array_list *stats,
- void *context) {
- handler->vtable->process_statistics(handler, interval, stats, context);
-}
-
-uint64_t aws_crt_statistics_handler_get_report_interval_ms(struct aws_crt_statistics_handler *handler) {
- return handler->vtable->get_report_interval_ms(handler);
-}
-
-void aws_crt_statistics_handler_destroy(struct aws_crt_statistics_handler *handler) {
- if (handler == NULL) {
- return;
- }
-
- handler->vtable->destroy(handler);
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/statistics.h>
+
+void aws_crt_statistics_handler_process_statistics(
+ struct aws_crt_statistics_handler *handler,
+ struct aws_crt_statistics_sample_interval *interval,
+ struct aws_array_list *stats,
+ void *context) {
+ handler->vtable->process_statistics(handler, interval, stats, context);
+}
+
+uint64_t aws_crt_statistics_handler_get_report_interval_ms(struct aws_crt_statistics_handler *handler) {
+ return handler->vtable->get_report_interval_ms(handler);
+}
+
+void aws_crt_statistics_handler_destroy(struct aws_crt_statistics_handler *handler) {
+ if (handler == NULL) {
+ return;
+ }
+
+ handler->vtable->destroy(handler);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/string.c b/contrib/restricted/aws/aws-c-common/source/string.c
index 26ab59cf1a..d1abf0dbff 100644
--- a/contrib/restricted/aws/aws-c-common/source/string.c
+++ b/contrib/restricted/aws/aws-c-common/source/string.c
@@ -1,17 +1,17 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/string.h>
struct aws_string *aws_string_new_from_c_str(struct aws_allocator *allocator, const char *c_str) {
- AWS_PRECONDITION(allocator && c_str);
+ AWS_PRECONDITION(allocator && c_str);
return aws_string_new_from_array(allocator, (const uint8_t *)c_str, strlen(c_str));
}
struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, const uint8_t *bytes, size_t len) {
- AWS_PRECONDITION(allocator);
- AWS_PRECONDITION(AWS_MEM_IS_READABLE(bytes, len));
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(bytes, len));
size_t malloc_size;
if (aws_add_size_checked(sizeof(struct aws_string) + 1, len, &malloc_size)) {
return NULL;
@@ -24,37 +24,37 @@ struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, co
/* Fields are declared const, so we need to copy them in like this */
*(struct aws_allocator **)(&str->allocator) = allocator;
*(size_t *)(&str->len) = len;
- if (len > 0) {
- memcpy((void *)str->bytes, bytes, len);
- }
+ if (len > 0) {
+ memcpy((void *)str->bytes, bytes, len);
+ }
*(uint8_t *)&str->bytes[len] = '\0';
- AWS_RETURN_WITH_POSTCONDITION(str, aws_string_is_valid(str));
+ AWS_RETURN_WITH_POSTCONDITION(str, aws_string_is_valid(str));
}
struct aws_string *aws_string_new_from_string(struct aws_allocator *allocator, const struct aws_string *str) {
- AWS_PRECONDITION(allocator && aws_string_is_valid(str));
+ AWS_PRECONDITION(allocator && aws_string_is_valid(str));
return aws_string_new_from_array(allocator, str->bytes, str->len);
}
-struct aws_string *aws_string_new_from_cursor(struct aws_allocator *allocator, const struct aws_byte_cursor *cursor) {
- AWS_PRECONDITION(allocator && aws_byte_cursor_is_valid(cursor));
- return aws_string_new_from_array(allocator, cursor->ptr, cursor->len);
-}
-
-struct aws_string *aws_string_new_from_buf(struct aws_allocator *allocator, const struct aws_byte_buf *buf) {
- AWS_PRECONDITION(allocator && aws_byte_buf_is_valid(buf));
- return aws_string_new_from_array(allocator, buf->buffer, buf->len);
-}
-
+struct aws_string *aws_string_new_from_cursor(struct aws_allocator *allocator, const struct aws_byte_cursor *cursor) {
+ AWS_PRECONDITION(allocator && aws_byte_cursor_is_valid(cursor));
+ return aws_string_new_from_array(allocator, cursor->ptr, cursor->len);
+}
+
+struct aws_string *aws_string_new_from_buf(struct aws_allocator *allocator, const struct aws_byte_buf *buf) {
+ AWS_PRECONDITION(allocator && aws_byte_buf_is_valid(buf));
+ return aws_string_new_from_array(allocator, buf->buffer, buf->len);
+}
+
void aws_string_destroy(struct aws_string *str) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
if (str && str->allocator) {
aws_mem_release(str->allocator, str);
}
}
void aws_string_destroy_secure(struct aws_string *str) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
if (str) {
aws_secure_zero((void *)aws_string_bytes(str), str->len);
if (str->allocator) {
@@ -64,25 +64,25 @@ void aws_string_destroy_secure(struct aws_string *str) {
}
int aws_string_compare(const struct aws_string *a, const struct aws_string *b) {
- AWS_PRECONDITION(!a || aws_string_is_valid(a));
- AWS_PRECONDITION(!b || aws_string_is_valid(b));
- if (a == b) {
- return 0; /* strings identical */
- }
- if (a == NULL) {
- return -1;
- }
- if (b == NULL) {
- return 1;
- }
-
+ AWS_PRECONDITION(!a || aws_string_is_valid(a));
+ AWS_PRECONDITION(!b || aws_string_is_valid(b));
+ if (a == b) {
+ return 0; /* strings identical */
+ }
+ if (a == NULL) {
+ return -1;
+ }
+ if (b == NULL) {
+ return 1;
+ }
+
size_t len_a = a->len;
size_t len_b = b->len;
size_t min_len = len_a < len_b ? len_a : len_b;
int ret = memcmp(aws_string_bytes(a), aws_string_bytes(b), min_len);
- AWS_POSTCONDITION(aws_string_is_valid(a));
- AWS_POSTCONDITION(aws_string_is_valid(b));
+ AWS_POSTCONDITION(aws_string_is_valid(a));
+ AWS_POSTCONDITION(aws_string_is_valid(b));
if (ret) {
return ret; /* overlapping characters differ */
}
@@ -96,183 +96,183 @@ int aws_string_compare(const struct aws_string *a, const struct aws_string *b) {
}
int aws_array_list_comparator_string(const void *a, const void *b) {
- if (a == b) {
- return 0; /* strings identical */
- }
- if (a == NULL) {
- return -1;
- }
- if (b == NULL) {
- return 1;
- }
+ if (a == b) {
+ return 0; /* strings identical */
+ }
+ if (a == NULL) {
+ return -1;
+ }
+ if (b == NULL) {
+ return 1;
+ }
const struct aws_string *str_a = *(const struct aws_string **)a;
const struct aws_string *str_b = *(const struct aws_string **)b;
return aws_string_compare(str_a, str_b);
}
-
-/**
- * Returns true if bytes of string are the same, false otherwise.
- */
-bool aws_string_eq(const struct aws_string *a, const struct aws_string *b) {
- AWS_PRECONDITION(!a || aws_string_is_valid(a));
- AWS_PRECONDITION(!b || aws_string_is_valid(b));
- if (a == b) {
- return true;
- }
- if (a == NULL || b == NULL) {
- return false;
- }
- return aws_array_eq(a->bytes, a->len, b->bytes, b->len);
-}
-
-/**
- * Returns true if bytes of string are equivalent, using a case-insensitive comparison.
- */
-bool aws_string_eq_ignore_case(const struct aws_string *a, const struct aws_string *b) {
- AWS_PRECONDITION(!a || aws_string_is_valid(a));
- AWS_PRECONDITION(!b || aws_string_is_valid(b));
- if (a == b) {
- return true;
- }
- if (a == NULL || b == NULL) {
- return false;
- }
- return aws_array_eq_ignore_case(a->bytes, a->len, b->bytes, b->len);
-}
-
-/**
- * Returns true if bytes of string and cursor are the same, false otherwise.
- */
-bool aws_string_eq_byte_cursor(const struct aws_string *str, const struct aws_byte_cursor *cur) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
- AWS_PRECONDITION(!cur || aws_byte_cursor_is_valid(cur));
- if (str == NULL && cur == NULL) {
- return true;
- }
- if (str == NULL || cur == NULL) {
- return false;
- }
- return aws_array_eq(str->bytes, str->len, cur->ptr, cur->len);
-}
-
-/**
- * Returns true if bytes of string and cursor are equivalent, using a case-insensitive comparison.
- */
-
-bool aws_string_eq_byte_cursor_ignore_case(const struct aws_string *str, const struct aws_byte_cursor *cur) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
- AWS_PRECONDITION(!cur || aws_byte_cursor_is_valid(cur));
- if (str == NULL && cur == NULL) {
- return true;
- }
- if (str == NULL || cur == NULL) {
- return false;
- }
- return aws_array_eq_ignore_case(str->bytes, str->len, cur->ptr, cur->len);
-}
-
-/**
- * Returns true if bytes of string and buffer are the same, false otherwise.
- */
-bool aws_string_eq_byte_buf(const struct aws_string *str, const struct aws_byte_buf *buf) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
- AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf));
- if (str == NULL && buf == NULL) {
- return true;
- }
- if (str == NULL || buf == NULL) {
- return false;
- }
- return aws_array_eq(str->bytes, str->len, buf->buffer, buf->len);
-}
-
-/**
- * Returns true if bytes of string and buffer are equivalent, using a case-insensitive comparison.
- */
-
-bool aws_string_eq_byte_buf_ignore_case(const struct aws_string *str, const struct aws_byte_buf *buf) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
- AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf));
- if (str == NULL && buf == NULL) {
- return true;
- }
- if (str == NULL || buf == NULL) {
- return false;
- }
- return aws_array_eq_ignore_case(str->bytes, str->len, buf->buffer, buf->len);
-}
-
-bool aws_string_eq_c_str(const struct aws_string *str, const char *c_str) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
- if (str == NULL && c_str == NULL) {
- return true;
- }
- if (str == NULL || c_str == NULL) {
- return false;
- }
- return aws_array_eq_c_str(str->bytes, str->len, c_str);
-}
-
-/**
- * Returns true if bytes of strings are equivalent, using a case-insensitive comparison.
- */
-bool aws_string_eq_c_str_ignore_case(const struct aws_string *str, const char *c_str) {
- AWS_PRECONDITION(!str || aws_string_is_valid(str));
- if (str == NULL && c_str == NULL) {
- return true;
- }
- if (str == NULL || c_str == NULL) {
- return false;
- }
- return aws_array_eq_c_str_ignore_case(str->bytes, str->len, c_str);
-}
-
-bool aws_byte_buf_write_from_whole_string(
- struct aws_byte_buf *AWS_RESTRICT buf,
- const struct aws_string *AWS_RESTRICT src) {
- AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(!src || aws_string_is_valid(src));
- if (buf == NULL || src == NULL) {
- return false;
- }
- return aws_byte_buf_write(buf, aws_string_bytes(src), src->len);
-}
-
-/**
- * Creates an aws_byte_cursor from an existing string.
- */
-struct aws_byte_cursor aws_byte_cursor_from_string(const struct aws_string *src) {
- AWS_PRECONDITION(aws_string_is_valid(src));
- return aws_byte_cursor_from_array(aws_string_bytes(src), src->len);
-}
-
-struct aws_string *aws_string_clone_or_reuse(struct aws_allocator *allocator, const struct aws_string *str) {
- AWS_PRECONDITION(allocator);
- AWS_PRECONDITION(aws_string_is_valid(str));
-
- if (str->allocator == NULL) {
- /* Since the string cannot be deallocated, we assume that it will remain valid for the lifetime of the
- * application */
- AWS_POSTCONDITION(aws_string_is_valid(str));
- return (struct aws_string *)str;
- }
-
- AWS_POSTCONDITION(aws_string_is_valid(str));
- return aws_string_new_from_string(allocator, str);
-}
-
-int aws_secure_strlen(const char *str, size_t max_read_len, size_t *str_len) {
- AWS_ERROR_PRECONDITION(str && str_len, AWS_ERROR_INVALID_ARGUMENT);
-
- /* why not strnlen? It doesn't work everywhere as it wasn't standardized til C11, and is considered
- * a GNU extension. This should be faster anyways. This should work for ascii and utf8.
- * Any other character sets in use deserve what they get. */
- char *null_char_ptr = memchr(str, '\0', max_read_len);
-
- if (null_char_ptr) {
- *str_len = null_char_ptr - str;
- return AWS_OP_SUCCESS;
- }
-
- return aws_raise_error(AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED);
-}
+
+/**
+ * Returns true if bytes of string are the same, false otherwise.
+ */
+bool aws_string_eq(const struct aws_string *a, const struct aws_string *b) {
+ AWS_PRECONDITION(!a || aws_string_is_valid(a));
+ AWS_PRECONDITION(!b || aws_string_is_valid(b));
+ if (a == b) {
+ return true;
+ }
+ if (a == NULL || b == NULL) {
+ return false;
+ }
+ return aws_array_eq(a->bytes, a->len, b->bytes, b->len);
+}
+
+/**
+ * Returns true if bytes of string are equivalent, using a case-insensitive comparison.
+ */
+bool aws_string_eq_ignore_case(const struct aws_string *a, const struct aws_string *b) {
+ AWS_PRECONDITION(!a || aws_string_is_valid(a));
+ AWS_PRECONDITION(!b || aws_string_is_valid(b));
+ if (a == b) {
+ return true;
+ }
+ if (a == NULL || b == NULL) {
+ return false;
+ }
+ return aws_array_eq_ignore_case(a->bytes, a->len, b->bytes, b->len);
+}
+
+/**
+ * Returns true if bytes of string and cursor are the same, false otherwise.
+ */
+bool aws_string_eq_byte_cursor(const struct aws_string *str, const struct aws_byte_cursor *cur) {
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ AWS_PRECONDITION(!cur || aws_byte_cursor_is_valid(cur));
+ if (str == NULL && cur == NULL) {
+ return true;
+ }
+ if (str == NULL || cur == NULL) {
+ return false;
+ }
+ return aws_array_eq(str->bytes, str->len, cur->ptr, cur->len);
+}
+
+/**
+ * Returns true if bytes of string and cursor are equivalent, using a case-insensitive comparison.
+ */
+
+bool aws_string_eq_byte_cursor_ignore_case(const struct aws_string *str, const struct aws_byte_cursor *cur) {
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ AWS_PRECONDITION(!cur || aws_byte_cursor_is_valid(cur));
+ if (str == NULL && cur == NULL) {
+ return true;
+ }
+ if (str == NULL || cur == NULL) {
+ return false;
+ }
+ return aws_array_eq_ignore_case(str->bytes, str->len, cur->ptr, cur->len);
+}
+
+/**
+ * Returns true if bytes of string and buffer are the same, false otherwise.
+ */
+bool aws_string_eq_byte_buf(const struct aws_string *str, const struct aws_byte_buf *buf) {
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf));
+ if (str == NULL && buf == NULL) {
+ return true;
+ }
+ if (str == NULL || buf == NULL) {
+ return false;
+ }
+ return aws_array_eq(str->bytes, str->len, buf->buffer, buf->len);
+}
+
+/**
+ * Returns true if bytes of string and buffer are equivalent, using a case-insensitive comparison.
+ */
+
+bool aws_string_eq_byte_buf_ignore_case(const struct aws_string *str, const struct aws_byte_buf *buf) {
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf));
+ if (str == NULL && buf == NULL) {
+ return true;
+ }
+ if (str == NULL || buf == NULL) {
+ return false;
+ }
+ return aws_array_eq_ignore_case(str->bytes, str->len, buf->buffer, buf->len);
+}
+
+bool aws_string_eq_c_str(const struct aws_string *str, const char *c_str) {
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ if (str == NULL && c_str == NULL) {
+ return true;
+ }
+ if (str == NULL || c_str == NULL) {
+ return false;
+ }
+ return aws_array_eq_c_str(str->bytes, str->len, c_str);
+}
+
+/**
+ * Returns true if bytes of strings are equivalent, using a case-insensitive comparison.
+ */
+bool aws_string_eq_c_str_ignore_case(const struct aws_string *str, const char *c_str) {
+ AWS_PRECONDITION(!str || aws_string_is_valid(str));
+ if (str == NULL && c_str == NULL) {
+ return true;
+ }
+ if (str == NULL || c_str == NULL) {
+ return false;
+ }
+ return aws_array_eq_c_str_ignore_case(str->bytes, str->len, c_str);
+}
+
+bool aws_byte_buf_write_from_whole_string(
+ struct aws_byte_buf *AWS_RESTRICT buf,
+ const struct aws_string *AWS_RESTRICT src) {
+ AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(!src || aws_string_is_valid(src));
+ if (buf == NULL || src == NULL) {
+ return false;
+ }
+ return aws_byte_buf_write(buf, aws_string_bytes(src), src->len);
+}
+
+/**
+ * Creates an aws_byte_cursor from an existing string.
+ */
+struct aws_byte_cursor aws_byte_cursor_from_string(const struct aws_string *src) {
+ AWS_PRECONDITION(aws_string_is_valid(src));
+ return aws_byte_cursor_from_array(aws_string_bytes(src), src->len);
+}
+
+struct aws_string *aws_string_clone_or_reuse(struct aws_allocator *allocator, const struct aws_string *str) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(aws_string_is_valid(str));
+
+ if (str->allocator == NULL) {
+ /* Since the string cannot be deallocated, we assume that it will remain valid for the lifetime of the
+ * application */
+ AWS_POSTCONDITION(aws_string_is_valid(str));
+ return (struct aws_string *)str;
+ }
+
+ AWS_POSTCONDITION(aws_string_is_valid(str));
+ return aws_string_new_from_string(allocator, str);
+}
+
+int aws_secure_strlen(const char *str, size_t max_read_len, size_t *str_len) {
+ AWS_ERROR_PRECONDITION(str && str_len, AWS_ERROR_INVALID_ARGUMENT);
+
+ /* why not strnlen? It doesn't work everywhere as it wasn't standardized til C11, and is considered
+ * a GNU extension. This should be faster anyways. This should work for ascii and utf8.
+ * Any other character sets in use deserve what they get. */
+ char *null_char_ptr = memchr(str, '\0', max_read_len);
+
+ if (null_char_ptr) {
+ *str_len = null_char_ptr - str;
+ return AWS_OP_SUCCESS;
+ }
+
+ return aws_raise_error(AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED);
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
index 832139d4aa..31ce7af1ab 100644
--- a/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
+++ b/contrib/restricted/aws/aws-c-common/source/task_scheduler.c
@@ -1,48 +1,48 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/task_scheduler.h>
-#include <aws/common/logging.h>
-
-#include <inttypes.h>
-
+#include <aws/common/logging.h>
+
+#include <inttypes.h>
+
static const size_t DEFAULT_QUEUE_SIZE = 7;
-void aws_task_init(struct aws_task *task, aws_task_fn *fn, void *arg, const char *type_tag) {
- AWS_ZERO_STRUCT(*task);
- task->fn = fn;
- task->arg = arg;
- task->type_tag = type_tag;
-}
-
-const char *aws_task_status_to_c_str(enum aws_task_status status) {
- switch (status) {
- case AWS_TASK_STATUS_RUN_READY:
- return "<Running>";
-
- case AWS_TASK_STATUS_CANCELED:
- return "<Canceled>";
-
- default:
- return "<Unknown>";
- }
-}
-
-void aws_task_run(struct aws_task *task, enum aws_task_status status) {
- AWS_ASSERT(task->fn);
- AWS_LOGF_DEBUG(
- AWS_LS_COMMON_TASK_SCHEDULER,
- "id=%p: Running %s task with %s status",
- (void *)task,
- task->type_tag,
- aws_task_status_to_c_str(status));
-
- task->fn(task, task->arg, status);
-}
-
+void aws_task_init(struct aws_task *task, aws_task_fn *fn, void *arg, const char *type_tag) {
+ AWS_ZERO_STRUCT(*task);
+ task->fn = fn;
+ task->arg = arg;
+ task->type_tag = type_tag;
+}
+
+const char *aws_task_status_to_c_str(enum aws_task_status status) {
+ switch (status) {
+ case AWS_TASK_STATUS_RUN_READY:
+ return "<Running>";
+
+ case AWS_TASK_STATUS_CANCELED:
+ return "<Canceled>";
+
+ default:
+ return "<Unknown>";
+ }
+}
+
+void aws_task_run(struct aws_task *task, enum aws_task_status status) {
+ AWS_ASSERT(task->fn);
+ AWS_LOGF_DEBUG(
+ AWS_LS_COMMON_TASK_SCHEDULER,
+ "id=%p: Running %s task with %s status",
+ (void *)task,
+ task->type_tag,
+ aws_task_status_to_c_str(status));
+
+ task->fn(task, task->arg, status);
+}
+
static int s_compare_timestamps(const void *a, const void *b) {
uint64_t a_time = (*(struct aws_task **)a)->timestamp;
uint64_t b_time = (*(struct aws_task **)b)->timestamp;
@@ -54,41 +54,41 @@ static void s_run_all(struct aws_task_scheduler *scheduler, uint64_t current_tim
int aws_task_scheduler_init(struct aws_task_scheduler *scheduler, struct aws_allocator *alloc) {
AWS_ASSERT(alloc);
- AWS_ZERO_STRUCT(*scheduler);
-
- if (aws_priority_queue_init_dynamic(
- &scheduler->timed_queue, alloc, DEFAULT_QUEUE_SIZE, sizeof(struct aws_task *), &s_compare_timestamps)) {
- return AWS_OP_ERR;
- };
-
+ AWS_ZERO_STRUCT(*scheduler);
+
+ if (aws_priority_queue_init_dynamic(
+ &scheduler->timed_queue, alloc, DEFAULT_QUEUE_SIZE, sizeof(struct aws_task *), &s_compare_timestamps)) {
+ return AWS_OP_ERR;
+ };
+
scheduler->alloc = alloc;
aws_linked_list_init(&scheduler->timed_list);
aws_linked_list_init(&scheduler->asap_list);
-
- AWS_POSTCONDITION(aws_task_scheduler_is_valid(scheduler));
- return AWS_OP_SUCCESS;
+
+ AWS_POSTCONDITION(aws_task_scheduler_is_valid(scheduler));
+ return AWS_OP_SUCCESS;
}
void aws_task_scheduler_clean_up(struct aws_task_scheduler *scheduler) {
AWS_ASSERT(scheduler);
- if (aws_task_scheduler_is_valid(scheduler)) {
- /* Execute all remaining tasks as CANCELED.
- * Do this in a loop so that tasks scheduled by other tasks are executed */
- while (aws_task_scheduler_has_tasks(scheduler, NULL)) {
- s_run_all(scheduler, UINT64_MAX, AWS_TASK_STATUS_CANCELED);
- }
+ if (aws_task_scheduler_is_valid(scheduler)) {
+ /* Execute all remaining tasks as CANCELED.
+ * Do this in a loop so that tasks scheduled by other tasks are executed */
+ while (aws_task_scheduler_has_tasks(scheduler, NULL)) {
+ s_run_all(scheduler, UINT64_MAX, AWS_TASK_STATUS_CANCELED);
+ }
}
aws_priority_queue_clean_up(&scheduler->timed_queue);
- AWS_ZERO_STRUCT(*scheduler);
+ AWS_ZERO_STRUCT(*scheduler);
+}
+
+bool aws_task_scheduler_is_valid(const struct aws_task_scheduler *scheduler) {
+ return scheduler && scheduler->alloc && aws_priority_queue_is_valid(&scheduler->timed_queue) &&
+ aws_linked_list_is_valid(&scheduler->asap_list) && aws_linked_list_is_valid(&scheduler->timed_list);
}
-bool aws_task_scheduler_is_valid(const struct aws_task_scheduler *scheduler) {
- return scheduler && scheduler->alloc && aws_priority_queue_is_valid(&scheduler->timed_queue) &&
- aws_linked_list_is_valid(&scheduler->asap_list) && aws_linked_list_is_valid(&scheduler->timed_list);
-}
-
bool aws_task_scheduler_has_tasks(const struct aws_task_scheduler *scheduler, uint64_t *next_task_time) {
AWS_ASSERT(scheduler);
@@ -128,12 +128,12 @@ void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struc
AWS_ASSERT(task);
AWS_ASSERT(task->fn);
- AWS_LOGF_DEBUG(
- AWS_LS_COMMON_TASK_SCHEDULER,
- "id=%p: Scheduling %s task for immediate execution",
- (void *)task,
- task->type_tag);
-
+ AWS_LOGF_DEBUG(
+ AWS_LS_COMMON_TASK_SCHEDULER,
+ "id=%p: Scheduling %s task for immediate execution",
+ (void *)task,
+ task->type_tag);
+
task->priority_queue_node.current_index = SIZE_MAX;
aws_linked_list_node_reset(&task->node);
task->timestamp = 0;
@@ -150,13 +150,13 @@ void aws_task_scheduler_schedule_future(
AWS_ASSERT(task);
AWS_ASSERT(task->fn);
- AWS_LOGF_DEBUG(
- AWS_LS_COMMON_TASK_SCHEDULER,
- "id=%p: Scheduling %s task for future execution at time %" PRIu64,
- (void *)task,
- task->type_tag,
- time_to_run);
-
+ AWS_LOGF_DEBUG(
+ AWS_LS_COMMON_TASK_SCHEDULER,
+ "id=%p: Scheduling %s task for future execution at time %" PRIu64,
+ (void *)task,
+ task->type_tag,
+ time_to_run);
+
task->timestamp = time_to_run;
task->priority_queue_node.current_index = SIZE_MAX;
@@ -256,9 +256,9 @@ void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct
} else {
aws_priority_queue_remove(&scheduler->timed_queue, &task, &task->priority_queue_node);
}
-
- /*
- * No need to log cancellation specially; it will get logged during the run call with the canceled status
- */
+
+ /*
+ * No need to log cancellation specially; it will get logged during the run call with the canceled status
+ */
aws_task_run(task, AWS_TASK_STATUS_CANCELED);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/uuid.c b/contrib/restricted/aws/aws-c-common/source/uuid.c
index b595c6c168..a962abd653 100644
--- a/contrib/restricted/aws/aws-c-common/source/uuid.c
+++ b/contrib/restricted/aws/aws-c-common/source/uuid.c
@@ -1,6 +1,6 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/uuid.h>
@@ -34,7 +34,7 @@ int aws_uuid_init(struct aws_uuid *uuid) {
}
int aws_uuid_init_from_str(struct aws_uuid *uuid, const struct aws_byte_cursor *uuid_str) {
- AWS_ERROR_PRECONDITION(uuid_str->len >= AWS_UUID_STR_LEN - 1, AWS_ERROR_INVALID_BUFFER_SIZE);
+ AWS_ERROR_PRECONDITION(uuid_str->len >= AWS_UUID_STR_LEN - 1, AWS_ERROR_INVALID_BUFFER_SIZE);
char cpy[AWS_UUID_STR_LEN] = {0};
memcpy(cpy, uuid_str->ptr, AWS_UUID_STR_LEN - 1);
@@ -67,7 +67,7 @@ int aws_uuid_init_from_str(struct aws_uuid *uuid, const struct aws_byte_cursor *
}
int aws_uuid_to_str(const struct aws_uuid *uuid, struct aws_byte_buf *output) {
- AWS_ERROR_PRECONDITION(output->capacity - output->len >= AWS_UUID_STR_LEN, AWS_ERROR_SHORT_BUFFER);
+ AWS_ERROR_PRECONDITION(output->capacity - output->len >= AWS_UUID_STR_LEN, AWS_ERROR_SHORT_BUFFER);
sprintf(
(char *)(output->buffer + output->len),
diff --git a/contrib/restricted/aws/aws-c-common/source/xml_parser.c b/contrib/restricted/aws/aws-c-common/source/xml_parser.c
index 7fa4da3461..692324ac9a 100644
--- a/contrib/restricted/aws/aws-c-common/source/xml_parser.c
+++ b/contrib/restricted/aws/aws-c-common/source/xml_parser.c
@@ -1,455 +1,455 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/array_list.h>
-#include <aws/common/logging.h>
-#include <aws/common/private/xml_parser_impl.h>
-
-#ifdef _MSC_VER
-/* allow non-constant declared initializers. */
-# pragma warning(disable : 4204)
-#endif
-
-static const size_t s_max_document_depth = 20;
-#define MAX_NAME_LEN ((size_t)256)
-#define NODE_CLOSE_OVERHEAD ((size_t)3)
-
-struct cb_stack_data {
- aws_xml_parser_on_node_encountered_fn *cb;
- void *user_data;
-};
-
-struct aws_xml_parser *aws_xml_parser_new(
- struct aws_allocator *allocator,
- const struct aws_xml_parser_options *options) {
-
- AWS_PRECONDITION(allocator);
- AWS_PRECONDITION(options);
-
- struct aws_xml_parser *parser = aws_mem_calloc(allocator, 1, sizeof(struct aws_xml_parser));
-
- if (parser == NULL) {
- return NULL;
- }
-
- parser->allocator = allocator;
- parser->doc = options->doc;
-
- parser->max_depth = s_max_document_depth;
- parser->error = AWS_OP_SUCCESS;
-
- if (options->max_depth) {
- parser->max_depth = options->max_depth;
- }
-
- if (aws_array_list_init_dynamic(&parser->callback_stack, allocator, 4, sizeof(struct cb_stack_data))) {
- aws_mem_release(allocator, parser);
- return NULL;
- }
-
- return parser;
-}
-
-void aws_xml_parser_destroy(struct aws_xml_parser *parser) {
- AWS_PRECONDITION(parser);
-
- aws_array_list_clean_up(&parser->callback_stack);
-
- aws_mem_release(parser->allocator, parser);
-}
-
-int s_node_next_sibling(struct aws_xml_parser *parser);
-
-static bool s_double_quote_fn(uint8_t value) {
- return value == '"';
-}
-
-/* load the node declaration line, parsing node name and attributes.
- *
- * something of the form:
- * <NodeName Attribute1=Value1 Attribute2=Value2 ...>
- * */
-static int s_load_node_decl(
- struct aws_xml_parser *parser,
- struct aws_byte_cursor *decl_body,
- struct aws_xml_node *node) {
- AWS_PRECONDITION(parser);
- AWS_PRECONDITION(decl_body);
- AWS_PRECONDITION(node);
-
- struct aws_array_list splits;
- AWS_ZERO_STRUCT(splits);
-
- AWS_ZERO_ARRAY(parser->split_scratch);
- aws_array_list_init_static(
- &splits, parser->split_scratch, AWS_ARRAY_SIZE(parser->split_scratch), sizeof(struct aws_byte_cursor));
-
- /* split by space, first split will be the node name, everything after will be attribute=value pairs. For now
- * we limit to 10 attributes, if this is exceeded we consider it invalid document. */
- if (aws_byte_cursor_split_on_char(decl_body, ' ', &splits)) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- size_t splits_count = aws_array_list_length(&splits);
-
- if (splits_count < 1) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- aws_array_list_get_at(&splits, &node->name, 0);
-
- AWS_ZERO_ARRAY(parser->attributes);
- if (splits.length > 1) {
- aws_array_list_init_static(
- &node->attributes,
- parser->attributes,
- AWS_ARRAY_SIZE(parser->attributes),
- sizeof(struct aws_xml_attribute));
-
- for (size_t i = 1; i < splits.length; ++i) {
- struct aws_byte_cursor attribute_pair;
- AWS_ZERO_STRUCT(attribute_pair);
- aws_array_list_get_at(&splits, &attribute_pair, i);
-
- struct aws_byte_cursor att_val_pair[2];
- AWS_ZERO_ARRAY(att_val_pair);
- struct aws_array_list att_val_pair_lst;
- AWS_ZERO_STRUCT(att_val_pair_lst);
- aws_array_list_init_static(&att_val_pair_lst, att_val_pair, 2, sizeof(struct aws_byte_cursor));
-
- if (!aws_byte_cursor_split_on_char(&attribute_pair, '=', &att_val_pair_lst)) {
- struct aws_xml_attribute attribute = {
- .name = att_val_pair[0],
- .value = aws_byte_cursor_trim_pred(&att_val_pair[1], s_double_quote_fn),
- };
- aws_array_list_push_back(&node->attributes, &attribute);
- }
- }
- }
-
- return AWS_OP_SUCCESS;
-}
-
-int aws_xml_parser_parse(
- struct aws_xml_parser *parser,
- aws_xml_parser_on_node_encountered_fn *on_node_encountered,
- void *user_data) {
-
- AWS_PRECONDITION(parser);
-
- if (on_node_encountered == NULL) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'on_node_encountered' argument for aws_xml_parser_parse is invalid.");
- aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- return AWS_OP_ERR;
- }
-
- aws_array_list_clear(&parser->callback_stack);
-
- /* burn everything that precedes the actual xml nodes. */
- while (parser->doc.len) {
- uint8_t *start = memchr(parser->doc.ptr, '<', parser->doc.len);
- if (!start) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- uint8_t *location = memchr(parser->doc.ptr, '>', parser->doc.len);
-
- if (!location) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- aws_byte_cursor_advance(&parser->doc, start - parser->doc.ptr);
- /* if these are preamble statements, burn them. otherwise don't seek at all
- * and assume it's just the doc with no preamble statements. */
- if (*(parser->doc.ptr + 1) == '?' || *(parser->doc.ptr + 1) == '!') {
- /* nobody cares about the preamble */
- size_t advance = location - parser->doc.ptr + 1;
- aws_byte_cursor_advance(&parser->doc, advance);
- } else {
- break;
- }
- }
-
- /* now we should be at the start of the actual document. */
- struct cb_stack_data stack_data = {
- .cb = on_node_encountered,
- .user_data = user_data,
- };
-
- AWS_FATAL_ASSERT(!aws_array_list_push_back(&parser->callback_stack, &stack_data));
- return s_node_next_sibling(parser);
-}
-
-int s_advance_to_closing_tag(
- struct aws_xml_parser *parser,
- struct aws_xml_node *node,
- struct aws_byte_cursor *out_body) {
- AWS_PRECONDITION(parser);
- AWS_PRECONDITION(node);
-
- /* currently the max node name is 256 characters. This is arbitrary, but should be enough
- * for our uses. If we ever generalize this, we'll have to come back and rethink this. */
- uint8_t name_close[MAX_NAME_LEN + NODE_CLOSE_OVERHEAD] = {0};
- uint8_t name_open[MAX_NAME_LEN + NODE_CLOSE_OVERHEAD] = {0};
-
- struct aws_byte_buf closing_cmp_buf = aws_byte_buf_from_empty_array(name_close, sizeof(name_close));
- struct aws_byte_buf open_cmp_buf = aws_byte_buf_from_empty_array(name_open, sizeof(name_open));
-
- size_t closing_name_len = node->name.len + NODE_CLOSE_OVERHEAD;
-
- if (closing_name_len > node->doc_at_body.len) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- return AWS_OP_ERR;
- }
-
- if (sizeof(name_close) < closing_name_len) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- return AWS_OP_ERR;
- }
-
- struct aws_byte_cursor open_bracket = aws_byte_cursor_from_c_str("<");
- struct aws_byte_cursor close_token = aws_byte_cursor_from_c_str("/");
- struct aws_byte_cursor close_bracket = aws_byte_cursor_from_c_str(">");
-
- aws_byte_buf_append(&open_cmp_buf, &open_bracket);
- aws_byte_buf_append(&open_cmp_buf, &node->name);
-
- aws_byte_buf_append(&closing_cmp_buf, &open_bracket);
- aws_byte_buf_append(&closing_cmp_buf, &close_token);
- aws_byte_buf_append(&closing_cmp_buf, &node->name);
- aws_byte_buf_append(&closing_cmp_buf, &close_bracket);
-
- size_t depth_count = 1;
- struct aws_byte_cursor to_find_open = aws_byte_cursor_from_buf(&open_cmp_buf);
- struct aws_byte_cursor to_find_close = aws_byte_cursor_from_buf(&closing_cmp_buf);
- struct aws_byte_cursor close_find_result;
- AWS_ZERO_STRUCT(close_find_result);
- do {
- if (aws_byte_cursor_find_exact(&parser->doc, &to_find_close, &close_find_result)) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- /* if we find an opening node with the same name, before the closing tag keep going. */
- struct aws_byte_cursor open_find_result;
- AWS_ZERO_STRUCT(open_find_result);
-
- while (parser->doc.len) {
- if (!aws_byte_cursor_find_exact(&parser->doc, &to_find_open, &open_find_result)) {
- if (open_find_result.ptr < close_find_result.ptr) {
- size_t skip_len = open_find_result.ptr - parser->doc.ptr;
- aws_byte_cursor_advance(&parser->doc, skip_len + 1);
- depth_count++;
- continue;
- }
- }
- size_t skip_len = close_find_result.ptr - parser->doc.ptr;
- aws_byte_cursor_advance(&parser->doc, skip_len + closing_cmp_buf.len);
- depth_count--;
- break;
- }
- } while (depth_count > 0);
-
- size_t len = close_find_result.ptr - node->doc_at_body.ptr;
-
- if (out_body) {
- *out_body = aws_byte_cursor_from_array(node->doc_at_body.ptr, len);
- }
-
- return parser->error;
-}
-
-int aws_xml_node_as_body(struct aws_xml_parser *parser, struct aws_xml_node *node, struct aws_byte_cursor *out_body) {
- AWS_PRECONDITION(parser);
- AWS_PRECONDITION(node);
-
- node->processed = true;
- return s_advance_to_closing_tag(parser, node, out_body);
-}
-
-int aws_xml_node_traverse(
- struct aws_xml_parser *parser,
- struct aws_xml_node *node,
- aws_xml_parser_on_node_encountered_fn *on_node_encountered,
- void *user_data) {
- AWS_PRECONDITION(parser);
- AWS_PRECONDITION(node);
-
- if (on_node_encountered == NULL) {
- AWS_LOGF_ERROR(
- AWS_LS_COMMON_XML_PARSER, "Callback 'on_node_encountered' for aws_xml_node_traverse is invalid.");
- aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- return AWS_OP_ERR;
- }
-
- node->processed = true;
- struct cb_stack_data stack_data = {
- .cb = on_node_encountered,
- .user_data = user_data,
- };
-
- size_t doc_depth = aws_array_list_length(&parser->callback_stack);
- if (doc_depth >= parser->max_depth) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- return AWS_OP_ERR;
- }
-
- if (aws_array_list_push_back(&parser->callback_stack, &stack_data)) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- return AWS_OP_ERR;
- }
-
- /* look for the next node at the current level. do this until we encounter the parent node's
- * closing tag. */
- while (!parser->stop_parsing && !parser->error) {
- uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
-
- if (!next_location) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
-
- if (!end_location) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- bool parent_closed = false;
-
- if (*(next_location + 1) == '/') {
- parent_closed = true;
- }
-
- size_t node_name_len = end_location - next_location;
-
- aws_byte_cursor_advance(&parser->doc, end_location - parser->doc.ptr + 1);
-
- if (parent_closed) {
- break;
- }
-
- struct aws_byte_cursor decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1);
-
- struct aws_xml_node next_node = {
- .doc_at_body = parser->doc,
- .processed = false,
- };
-
- if (s_load_node_decl(parser, &decl_body, &next_node)) {
- return AWS_OP_ERR;
- }
-
- if (!on_node_encountered(parser, &next_node, user_data)) {
- parser->stop_parsing = true;
- return parser->error;
- }
-
- /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */
- if (!parser->stop_parsing && !next_node.processed) {
- if (s_advance_to_closing_tag(parser, &next_node, NULL)) {
- return AWS_OP_ERR;
- }
- }
- }
-
- if (parser->stop_parsing) {
- return parser->error;
- }
-
- aws_array_list_pop_back(&parser->callback_stack);
- return parser->error;
-}
-
-int aws_xml_node_get_name(const struct aws_xml_node *node, struct aws_byte_cursor *out_name) {
- AWS_PRECONDITION(node);
-
- if (out_name == NULL) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'out_name' argument for aws_xml_node_get_name is invalid.");
- aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- return AWS_OP_ERR;
- }
-
- *out_name = node->name;
- return AWS_OP_SUCCESS;
-}
-
-size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node) {
- AWS_PRECONDITION(node);
- return aws_array_list_length(&node->attributes);
-}
-
-int aws_xml_node_get_attribute(
- const struct aws_xml_node *node,
- size_t attribute_index,
- struct aws_xml_attribute *out_attribute) {
- AWS_PRECONDITION(node);
-
- if (out_attribute == NULL) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'out_attribute' argument for aws_xml_node_get_attribute is invalid.");
- aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- return AWS_OP_ERR;
- }
-
- return aws_array_list_get_at(&node->attributes, out_attribute, attribute_index);
-}
-
-/* advance the parser to the next sibling node.*/
-int s_node_next_sibling(struct aws_xml_parser *parser) {
- AWS_PRECONDITION(parser);
-
- uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
-
- if (!next_location) {
- return parser->error;
- }
-
- aws_byte_cursor_advance(&parser->doc, next_location - parser->doc.ptr);
- uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
-
- if (!end_location) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
- return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
- }
-
- size_t node_name_len = end_location - next_location;
- aws_byte_cursor_advance(&parser->doc, end_location - parser->doc.ptr + 1);
-
- struct aws_byte_cursor node_decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1);
-
- struct aws_xml_node sibling_node = {
- .doc_at_body = parser->doc,
- .processed = false,
- };
-
- if (s_load_node_decl(parser, &node_decl_body, &sibling_node)) {
- return AWS_OP_ERR;
- }
-
- struct cb_stack_data stack_data;
- AWS_ZERO_STRUCT(stack_data);
- aws_array_list_back(&parser->callback_stack, &stack_data);
- AWS_FATAL_ASSERT(stack_data.cb);
-
- parser->stop_parsing = !stack_data.cb(parser, &sibling_node, stack_data.user_data);
-
- /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */
- if (!sibling_node.processed) {
- if (s_advance_to_closing_tag(parser, &sibling_node, NULL)) {
- return AWS_OP_ERR;
- }
- }
-
- return parser->error;
-}
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/logging.h>
+#include <aws/common/private/xml_parser_impl.h>
+
+#ifdef _MSC_VER
+/* allow non-constant declared initializers. */
+# pragma warning(disable : 4204)
+#endif
+
+static const size_t s_max_document_depth = 20;
+#define MAX_NAME_LEN ((size_t)256)
+#define NODE_CLOSE_OVERHEAD ((size_t)3)
+
+struct cb_stack_data {
+ aws_xml_parser_on_node_encountered_fn *cb;
+ void *user_data;
+};
+
+struct aws_xml_parser *aws_xml_parser_new(
+ struct aws_allocator *allocator,
+ const struct aws_xml_parser_options *options) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(options);
+
+ struct aws_xml_parser *parser = aws_mem_calloc(allocator, 1, sizeof(struct aws_xml_parser));
+
+ if (parser == NULL) {
+ return NULL;
+ }
+
+ parser->allocator = allocator;
+ parser->doc = options->doc;
+
+ parser->max_depth = s_max_document_depth;
+ parser->error = AWS_OP_SUCCESS;
+
+ if (options->max_depth) {
+ parser->max_depth = options->max_depth;
+ }
+
+ if (aws_array_list_init_dynamic(&parser->callback_stack, allocator, 4, sizeof(struct cb_stack_data))) {
+ aws_mem_release(allocator, parser);
+ return NULL;
+ }
+
+ return parser;
+}
+
+void aws_xml_parser_destroy(struct aws_xml_parser *parser) {
+ AWS_PRECONDITION(parser);
+
+ aws_array_list_clean_up(&parser->callback_stack);
+
+ aws_mem_release(parser->allocator, parser);
+}
+
+int s_node_next_sibling(struct aws_xml_parser *parser);
+
+static bool s_double_quote_fn(uint8_t value) {
+ return value == '"';
+}
+
+/* load the node declaration line, parsing node name and attributes.
+ *
+ * something of the form:
+ * <NodeName Attribute1=Value1 Attribute2=Value2 ...>
+ * */
+static int s_load_node_decl(
+ struct aws_xml_parser *parser,
+ struct aws_byte_cursor *decl_body,
+ struct aws_xml_node *node) {
+ AWS_PRECONDITION(parser);
+ AWS_PRECONDITION(decl_body);
+ AWS_PRECONDITION(node);
+
+ struct aws_array_list splits;
+ AWS_ZERO_STRUCT(splits);
+
+ AWS_ZERO_ARRAY(parser->split_scratch);
+ aws_array_list_init_static(
+ &splits, parser->split_scratch, AWS_ARRAY_SIZE(parser->split_scratch), sizeof(struct aws_byte_cursor));
+
+ /* split by space, first split will be the node name, everything after will be attribute=value pairs. For now
+ * we limit to 10 attributes, if this is exceeded we consider it invalid document. */
+ if (aws_byte_cursor_split_on_char(decl_body, ' ', &splits)) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ size_t splits_count = aws_array_list_length(&splits);
+
+ if (splits_count < 1) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ aws_array_list_get_at(&splits, &node->name, 0);
+
+ AWS_ZERO_ARRAY(parser->attributes);
+ if (splits.length > 1) {
+ aws_array_list_init_static(
+ &node->attributes,
+ parser->attributes,
+ AWS_ARRAY_SIZE(parser->attributes),
+ sizeof(struct aws_xml_attribute));
+
+ for (size_t i = 1; i < splits.length; ++i) {
+ struct aws_byte_cursor attribute_pair;
+ AWS_ZERO_STRUCT(attribute_pair);
+ aws_array_list_get_at(&splits, &attribute_pair, i);
+
+ struct aws_byte_cursor att_val_pair[2];
+ AWS_ZERO_ARRAY(att_val_pair);
+ struct aws_array_list att_val_pair_lst;
+ AWS_ZERO_STRUCT(att_val_pair_lst);
+ aws_array_list_init_static(&att_val_pair_lst, att_val_pair, 2, sizeof(struct aws_byte_cursor));
+
+ if (!aws_byte_cursor_split_on_char(&attribute_pair, '=', &att_val_pair_lst)) {
+ struct aws_xml_attribute attribute = {
+ .name = att_val_pair[0],
+ .value = aws_byte_cursor_trim_pred(&att_val_pair[1], s_double_quote_fn),
+ };
+ aws_array_list_push_back(&node->attributes, &attribute);
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_xml_parser_parse(
+ struct aws_xml_parser *parser,
+ aws_xml_parser_on_node_encountered_fn *on_node_encountered,
+ void *user_data) {
+
+ AWS_PRECONDITION(parser);
+
+ if (on_node_encountered == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'on_node_encountered' argument for aws_xml_parser_parse is invalid.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return AWS_OP_ERR;
+ }
+
+ aws_array_list_clear(&parser->callback_stack);
+
+ /* burn everything that precedes the actual xml nodes. */
+ while (parser->doc.len) {
+ uint8_t *start = memchr(parser->doc.ptr, '<', parser->doc.len);
+ if (!start) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ uint8_t *location = memchr(parser->doc.ptr, '>', parser->doc.len);
+
+ if (!location) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ aws_byte_cursor_advance(&parser->doc, start - parser->doc.ptr);
+ /* if these are preamble statements, burn them. otherwise don't seek at all
+ * and assume it's just the doc with no preamble statements. */
+ if (*(parser->doc.ptr + 1) == '?' || *(parser->doc.ptr + 1) == '!') {
+ /* nobody cares about the preamble */
+ size_t advance = location - parser->doc.ptr + 1;
+ aws_byte_cursor_advance(&parser->doc, advance);
+ } else {
+ break;
+ }
+ }
+
+ /* now we should be at the start of the actual document. */
+ struct cb_stack_data stack_data = {
+ .cb = on_node_encountered,
+ .user_data = user_data,
+ };
+
+ AWS_FATAL_ASSERT(!aws_array_list_push_back(&parser->callback_stack, &stack_data));
+ return s_node_next_sibling(parser);
+}
+
+int s_advance_to_closing_tag(
+ struct aws_xml_parser *parser,
+ struct aws_xml_node *node,
+ struct aws_byte_cursor *out_body) {
+ AWS_PRECONDITION(parser);
+ AWS_PRECONDITION(node);
+
+ /* currently the max node name is 256 characters. This is arbitrary, but should be enough
+ * for our uses. If we ever generalize this, we'll have to come back and rethink this. */
+ uint8_t name_close[MAX_NAME_LEN + NODE_CLOSE_OVERHEAD] = {0};
+ uint8_t name_open[MAX_NAME_LEN + NODE_CLOSE_OVERHEAD] = {0};
+
+ struct aws_byte_buf closing_cmp_buf = aws_byte_buf_from_empty_array(name_close, sizeof(name_close));
+ struct aws_byte_buf open_cmp_buf = aws_byte_buf_from_empty_array(name_open, sizeof(name_open));
+
+ size_t closing_name_len = node->name.len + NODE_CLOSE_OVERHEAD;
+
+ if (closing_name_len > node->doc_at_body.len) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ return AWS_OP_ERR;
+ }
+
+ if (sizeof(name_close) < closing_name_len) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor open_bracket = aws_byte_cursor_from_c_str("<");
+ struct aws_byte_cursor close_token = aws_byte_cursor_from_c_str("/");
+ struct aws_byte_cursor close_bracket = aws_byte_cursor_from_c_str(">");
+
+ aws_byte_buf_append(&open_cmp_buf, &open_bracket);
+ aws_byte_buf_append(&open_cmp_buf, &node->name);
+
+ aws_byte_buf_append(&closing_cmp_buf, &open_bracket);
+ aws_byte_buf_append(&closing_cmp_buf, &close_token);
+ aws_byte_buf_append(&closing_cmp_buf, &node->name);
+ aws_byte_buf_append(&closing_cmp_buf, &close_bracket);
+
+ size_t depth_count = 1;
+ struct aws_byte_cursor to_find_open = aws_byte_cursor_from_buf(&open_cmp_buf);
+ struct aws_byte_cursor to_find_close = aws_byte_cursor_from_buf(&closing_cmp_buf);
+ struct aws_byte_cursor close_find_result;
+ AWS_ZERO_STRUCT(close_find_result);
+ do {
+ if (aws_byte_cursor_find_exact(&parser->doc, &to_find_close, &close_find_result)) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ /* if we find an opening node with the same name, before the closing tag keep going. */
+ struct aws_byte_cursor open_find_result;
+ AWS_ZERO_STRUCT(open_find_result);
+
+ while (parser->doc.len) {
+ if (!aws_byte_cursor_find_exact(&parser->doc, &to_find_open, &open_find_result)) {
+ if (open_find_result.ptr < close_find_result.ptr) {
+ size_t skip_len = open_find_result.ptr - parser->doc.ptr;
+ aws_byte_cursor_advance(&parser->doc, skip_len + 1);
+ depth_count++;
+ continue;
+ }
+ }
+ size_t skip_len = close_find_result.ptr - parser->doc.ptr;
+ aws_byte_cursor_advance(&parser->doc, skip_len + closing_cmp_buf.len);
+ depth_count--;
+ break;
+ }
+ } while (depth_count > 0);
+
+ size_t len = close_find_result.ptr - node->doc_at_body.ptr;
+
+ if (out_body) {
+ *out_body = aws_byte_cursor_from_array(node->doc_at_body.ptr, len);
+ }
+
+ return parser->error;
+}
+
+int aws_xml_node_as_body(struct aws_xml_parser *parser, struct aws_xml_node *node, struct aws_byte_cursor *out_body) {
+ AWS_PRECONDITION(parser);
+ AWS_PRECONDITION(node);
+
+ node->processed = true;
+ return s_advance_to_closing_tag(parser, node, out_body);
+}
+
+int aws_xml_node_traverse(
+ struct aws_xml_parser *parser,
+ struct aws_xml_node *node,
+ aws_xml_parser_on_node_encountered_fn *on_node_encountered,
+ void *user_data) {
+ AWS_PRECONDITION(parser);
+ AWS_PRECONDITION(node);
+
+ if (on_node_encountered == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_XML_PARSER, "Callback 'on_node_encountered' for aws_xml_node_traverse is invalid.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return AWS_OP_ERR;
+ }
+
+ node->processed = true;
+ struct cb_stack_data stack_data = {
+ .cb = on_node_encountered,
+ .user_data = user_data,
+ };
+
+ size_t doc_depth = aws_array_list_length(&parser->callback_stack);
+ if (doc_depth >= parser->max_depth) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_push_back(&parser->callback_stack, &stack_data)) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ parser->error = aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ return AWS_OP_ERR;
+ }
+
+ /* look for the next node at the current level. do this until we encounter the parent node's
+ * closing tag. */
+ while (!parser->stop_parsing && !parser->error) {
+ uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
+
+ if (!next_location) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
+
+ if (!end_location) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ bool parent_closed = false;
+
+ if (*(next_location + 1) == '/') {
+ parent_closed = true;
+ }
+
+ size_t node_name_len = end_location - next_location;
+
+ aws_byte_cursor_advance(&parser->doc, end_location - parser->doc.ptr + 1);
+
+ if (parent_closed) {
+ break;
+ }
+
+ struct aws_byte_cursor decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1);
+
+ struct aws_xml_node next_node = {
+ .doc_at_body = parser->doc,
+ .processed = false,
+ };
+
+ if (s_load_node_decl(parser, &decl_body, &next_node)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!on_node_encountered(parser, &next_node, user_data)) {
+ parser->stop_parsing = true;
+ return parser->error;
+ }
+
+ /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */
+ if (!parser->stop_parsing && !next_node.processed) {
+ if (s_advance_to_closing_tag(parser, &next_node, NULL)) {
+ return AWS_OP_ERR;
+ }
+ }
+ }
+
+ if (parser->stop_parsing) {
+ return parser->error;
+ }
+
+ aws_array_list_pop_back(&parser->callback_stack);
+ return parser->error;
+}
+
+int aws_xml_node_get_name(const struct aws_xml_node *node, struct aws_byte_cursor *out_name) {
+ AWS_PRECONDITION(node);
+
+ if (out_name == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'out_name' argument for aws_xml_node_get_name is invalid.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return AWS_OP_ERR;
+ }
+
+ *out_name = node->name;
+ return AWS_OP_SUCCESS;
+}
+
+size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node) {
+ AWS_PRECONDITION(node);
+ return aws_array_list_length(&node->attributes);
+}
+
+int aws_xml_node_get_attribute(
+ const struct aws_xml_node *node,
+ size_t attribute_index,
+ struct aws_xml_attribute *out_attribute) {
+ AWS_PRECONDITION(node);
+
+ if (out_attribute == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "'out_attribute' argument for aws_xml_node_get_attribute is invalid.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return AWS_OP_ERR;
+ }
+
+ return aws_array_list_get_at(&node->attributes, out_attribute, attribute_index);
+}
+
+/* advance the parser to the next sibling node.*/
+int s_node_next_sibling(struct aws_xml_parser *parser) {
+ AWS_PRECONDITION(parser);
+
+ uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
+
+ if (!next_location) {
+ return parser->error;
+ }
+
+ aws_byte_cursor_advance(&parser->doc, next_location - parser->doc.ptr);
+ uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
+
+ if (!end_location) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ size_t node_name_len = end_location - next_location;
+ aws_byte_cursor_advance(&parser->doc, end_location - parser->doc.ptr + 1);
+
+ struct aws_byte_cursor node_decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1);
+
+ struct aws_xml_node sibling_node = {
+ .doc_at_body = parser->doc,
+ .processed = false,
+ };
+
+ if (s_load_node_decl(parser, &node_decl_body, &sibling_node)) {
+ return AWS_OP_ERR;
+ }
+
+ struct cb_stack_data stack_data;
+ AWS_ZERO_STRUCT(stack_data);
+ aws_array_list_back(&parser->callback_stack, &stack_data);
+ AWS_FATAL_ASSERT(stack_data.cb);
+
+ parser->stop_parsing = !stack_data.cb(parser, &sibling_node, stack_data.user_data);
+
+ /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */
+ if (!sibling_node.processed) {
+ if (s_advance_to_closing_tag(parser, &sibling_node, NULL)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return parser->error;
+}