diff options
author | dakovalkov <[email protected]> | 2023-12-03 13:33:55 +0300 |
---|---|---|
committer | dakovalkov <[email protected]> | 2023-12-03 14:04:39 +0300 |
commit | 2a718325637e5302334b6d0a6430f63168f8dbb3 (patch) | |
tree | 64be81080b7df9ec1d86d053a0c394ae53fcf1fe /contrib/restricted/aws/aws-c-common/source | |
parent | e0d94a470142d95c3007e9c5d80380994940664a (diff) |
Update contrib/libs/aws-sdk-cpp to 1.11.37
Diffstat (limited to 'contrib/restricted/aws/aws-c-common/source')
22 files changed, 338 insertions, 939 deletions
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator.c b/contrib/restricted/aws/aws-c-common/source/allocator.c index a6726624702..67e8695996a 100644 --- a/contrib/restricted/aws/aws-c-common/source/allocator.c +++ b/contrib/restricted/aws/aws-c-common/source/allocator.c @@ -37,7 +37,7 @@ bool aws_allocator_is_valid(const struct aws_allocator *alloc) { static void *s_default_malloc(struct aws_allocator *allocator, size_t size) { (void)allocator; /* larger allocations should be aligned so that AVX and friends can avoid - * the extra preable during unaligned versions of memcpy/memset on big buffers + * the extra preamble during unaligned versions of memcpy/memset on big buffers * This will also accelerate hardware CRC and SHA on ARM chips * * 64 byte alignment for > page allocations on 64 bit systems diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c index ffc6e0d4c93..98c51b88d18 100644 --- a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c +++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c @@ -85,11 +85,26 @@ static bool s_has_avx2(void) { return true; } +static bool s_has_bmi2(void) { + uint32_t abcd[4]; + + /* Check BMI2: + * CPUID.(EAX=07H, ECX=0H):EBX.BMI2[bit 8]==1 */ + uint32_t bmi2_mask = (1 << 8); + aws_run_cpuid(7, 0, abcd); + if ((abcd[1] & bmi2_mask) != bmi2_mask) { + return false; + } + + return true; +} + has_feature_fn *s_check_cpu_feature[AWS_CPU_FEATURE_COUNT] = { [AWS_CPU_FEATURE_CLMUL] = s_has_clmul, [AWS_CPU_FEATURE_SSE_4_1] = s_has_sse41, [AWS_CPU_FEATURE_SSE_4_2] = s_has_sse42, [AWS_CPU_FEATURE_AVX2] = s_has_avx2, + [AWS_CPU_FEATURE_BMI2] = s_has_bmi2, }; bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { diff --git a/contrib/restricted/aws/aws-c-common/source/array_list.c b/contrib/restricted/aws/aws-c-common/source/array_list.c index 7e05636a750..45c8a3cc76f 100644 --- a/contrib/restricted/aws/aws-c-common/source/array_list.c +++ b/contrib/restricted/aws/aws-c-common/source/array_list.c @@ -10,7 +10,7 @@ int aws_array_list_calc_necessary_size(struct aws_array_list *AWS_RESTRICT list, size_t index, size_t *necessary_size) { AWS_PRECONDITION(aws_array_list_is_valid(list)); - size_t index_inc; + size_t index_inc = 0; if (aws_add_size_checked(index, 1, &index_inc)) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; @@ -199,7 +199,8 @@ void aws_array_list_swap(struct aws_array_list *AWS_RESTRICT list, size_t a, siz return; } - void *item1 = NULL, *item2 = NULL; + void *item1 = NULL; + void *item2 = NULL; aws_array_list_get_at_ptr(list, &item1, a); aws_array_list_get_at_ptr(list, &item2, b); aws_array_list_mem_swap(item1, item2, list->item_size); diff --git a/contrib/restricted/aws/aws-c-common/source/bus.c b/contrib/restricted/aws/aws-c-common/source/bus.c deleted file mode 100644 index 68bb29deda1..00000000000 --- a/contrib/restricted/aws/aws-c-common/source/bus.c +++ /dev/null @@ -1,724 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -#include <aws/common/bus.h> - -#include <aws/common/allocator.h> -#include <aws/common/atomics.h> -#include <aws/common/byte_buf.h> -#include <aws/common/condition_variable.h> -#include <aws/common/hash_table.h> -#include <aws/common/linked_list.h> -#include <aws/common/logging.h> -#include <aws/common/mutex.h> -#include <aws/common/thread.h> - -#include <inttypes.h> - -#ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable : 4204) /* nonstandard extension used: non-constant aggregate initializer */ -#endif - -struct aws_bus { - struct aws_allocator *allocator; - - /* vtable and additional data structures for delivery policy */ - void *impl; -}; - -/* MUST be the first member of any impl to allow blind casting */ -struct bus_vtable { - void (*clean_up)(struct aws_bus *bus); - - int (*send)(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)); - - int (*subscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data); - - void (*unsubscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data); -}; - -/* each bound callback is stored as a bus_listener in the slots table */ -struct bus_listener { - struct aws_linked_list_node list_node; - void *user_data; - aws_bus_listener_fn *deliver; -}; - -/* value type stored in each slot in the slots table in a bus */ -struct listener_list { - struct aws_allocator *allocator; - struct aws_linked_list listeners; -}; - -/* find a listener list (or NULL) by address */ -static struct listener_list *bus_find_listeners(struct aws_hash_table *slots, uint64_t address) { - struct aws_hash_element *elem = NULL; - if (aws_hash_table_find(slots, (void *)(uintptr_t)address, &elem)) { - return NULL; - } - - if (!elem) { - return NULL; - } - - struct listener_list *list = elem->value; - return list; -} - -/* find a listener list by address, or create/insert/return a new one */ -static struct listener_list *bus_find_or_create_listeners( - struct aws_allocator *allocator, - struct aws_hash_table *slots, - uint64_t address) { - struct listener_list *list = bus_find_listeners(slots, address); - if (list) { - return list; - } - - list = aws_mem_calloc(allocator, 1, sizeof(struct listener_list)); - list->allocator = allocator; - aws_linked_list_init(&list->listeners); - aws_hash_table_put(slots, (void *)(uintptr_t)address, list, NULL); - return list; -} - -static void s_bus_deliver_msg_to_slot( - struct aws_bus *bus, - uint64_t slot, - uint64_t address, - struct aws_hash_table *slots, - const void *payload) { - (void)bus; - struct listener_list *list = bus_find_listeners(slots, slot); - if (!list) { - return; - } - struct aws_linked_list_node *node = aws_linked_list_begin(&list->listeners); - for (; node != aws_linked_list_end(&list->listeners); node = aws_linked_list_next(node)) { - struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node); - listener->deliver(address, payload, listener->user_data); - } -} - -/* common delivery logic */ -static void s_bus_deliver_msg( - struct aws_bus *bus, - uint64_t address, - struct aws_hash_table *slots, - const void *payload) { - s_bus_deliver_msg_to_slot(bus, AWS_BUS_ADDRESS_ALL, address, slots, payload); - s_bus_deliver_msg_to_slot(bus, address, address, slots, payload); -} - -/* common subscribe logic */ -static int s_bus_subscribe( - struct aws_bus *bus, - uint64_t address, - struct aws_hash_table *slots, - aws_bus_listener_fn *callback, - void *user_data) { - - if (address == AWS_BUS_ADDRESS_CLOSE) { - AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot directly subscribe to AWS_BUS_ADDRESS_CLOSE(0)"); - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - } - - struct listener_list *list = bus_find_or_create_listeners(bus->allocator, slots, address); - struct bus_listener *listener = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_listener)); - listener->deliver = callback; - listener->user_data = user_data; - aws_linked_list_push_back(&list->listeners, &listener->list_node); - - return AWS_OP_SUCCESS; -} - -/* common unsubscribe logic */ -static void s_bus_unsubscribe( - struct aws_bus *bus, - uint64_t address, - struct aws_hash_table *slots, - aws_bus_listener_fn *callback, - void *user_data) { - (void)bus; - - if (address == AWS_BUS_ADDRESS_CLOSE) { - AWS_LOGF_WARN(AWS_LS_COMMON_BUS, "Attempted to unsubscribe from invalid address AWS_BUS_ADDRESS_CLOSE") - return; - } - - struct listener_list *list = bus_find_listeners(slots, address); - if (!list) { - return; - } - - struct aws_linked_list_node *node; - for (node = aws_linked_list_begin(&list->listeners); node != aws_linked_list_end(&list->listeners); - node = aws_linked_list_next(node)) { - - struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node); - if (listener->deliver == callback && listener->user_data == user_data) { - aws_linked_list_remove(node); - aws_mem_release(list->allocator, listener); - return; - } - } -} - -/* destructor for listener lists in the slots tables */ -void s_bus_destroy_listener_list(void *data) { - struct listener_list *list = data; - AWS_PRECONDITION(list->allocator); - /* call all listeners with an AWS_BUS_ADDRESS_CLOSE message type to clean up */ - while (!aws_linked_list_empty(&list->listeners)) { - struct aws_linked_list_node *back = aws_linked_list_back(&list->listeners); - struct bus_listener *listener = AWS_CONTAINER_OF(back, struct bus_listener, list_node); - listener->deliver(AWS_BUS_ADDRESS_CLOSE, NULL, listener->user_data); - aws_linked_list_pop_back(&list->listeners); - aws_mem_release(list->allocator, listener); - } - aws_mem_release(list->allocator, list); -} - -/* - * AWS_BUS_SYNC implementation - */ -struct bus_sync_impl { - struct bus_vtable vtable; - struct { - /* Map of address -> list of listeners */ - struct aws_hash_table table; - } slots; -}; - -static void s_bus_sync_clean_up(struct aws_bus *bus) { - struct bus_sync_impl *impl = bus->impl; - aws_hash_table_clean_up(&impl->slots.table); - aws_mem_release(bus->allocator, impl); -} - -static int s_bus_sync_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) { - struct bus_sync_impl *impl = bus->impl; - s_bus_deliver_msg(bus, address, &impl->slots.table, payload); - if (destructor) { - destructor(payload); - } - return AWS_OP_SUCCESS; -} - -static int s_bus_sync_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data) { - struct bus_sync_impl *impl = bus->impl; - return s_bus_subscribe(bus, address, &impl->slots.table, callback, user_data); -} - -static void s_bus_sync_unsubscribe( - struct aws_bus *bus, - uint64_t address, - aws_bus_listener_fn *callback, - void *user_data) { - struct bus_sync_impl *impl = bus->impl; - s_bus_unsubscribe(bus, address, &impl->slots.table, callback, user_data); -} - -static struct bus_vtable bus_sync_vtable = { - .clean_up = s_bus_sync_clean_up, - .send = s_bus_sync_send, - .subscribe = s_bus_sync_subscribe, - .unsubscribe = s_bus_sync_unsubscribe, -}; - -static void s_bus_sync_init(struct aws_bus *bus, const struct aws_bus_options *options) { - (void)options; - - struct bus_sync_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_sync_impl)); - impl->vtable = bus_sync_vtable; - - if (aws_hash_table_init( - &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) { - goto error; - } - - return; - -error: - aws_mem_release(bus->allocator, impl); -} - -/* - * AWS_BUS_ASYNC implementation - */ -struct bus_async_impl { - struct bus_vtable vtable; - struct { - /* Map of address -> list of listeners */ - struct aws_hash_table table; - } slots; - - /* Queue of bus_messages to deliver */ - struct { - struct aws_mutex mutex; - /* backing memory for the message free list */ - void *buffer; - void *buffer_end; /* 1 past the end of buffer */ - /* message free list */ - struct aws_linked_list free; /* struct bus_message */ - /* message delivery queue */ - struct aws_linked_list msgs; /* struct bus_message */ - /* list of pending adds/removes of listeners */ - struct aws_linked_list subs; /* struct pending_listener */ - } queue; - - /* dispatch thread */ - struct { - struct aws_thread thread; - struct aws_condition_variable notify; - bool running; - struct aws_atomic_var started; - struct aws_atomic_var exited; - } dispatch; - - bool reliable; -}; - -/* represents a message in the queue on impls that queue */ -struct bus_message { - struct aws_linked_list_node list_node; - uint64_t address; - void *payload; - - void (*destructor)(void *); -}; - -struct pending_listener { - struct aws_linked_list_node list_node; - uint64_t address; - aws_bus_listener_fn *listener; - void *user_data; - uint32_t add : 1; - uint32_t remove : 1; -}; - -static void s_bus_message_clean_up(struct bus_message *msg) { - if (msg->destructor) { - msg->destructor(msg->payload); - } - msg->destructor = NULL; - msg->payload = NULL; -} - -/* Assumes the caller holds the lock */ -static void s_bus_async_free_message(struct aws_bus *bus, struct bus_message *msg) { - struct bus_async_impl *impl = bus->impl; - s_bus_message_clean_up(msg); - if ((void *)msg >= impl->queue.buffer && (void *)msg < impl->queue.buffer_end) { - AWS_ZERO_STRUCT(*msg); - aws_linked_list_push_back(&impl->queue.free, &msg->list_node); - return; - } - aws_mem_release(bus->allocator, msg); -} - -/* Assumes the caller holds the lock */ -struct bus_message *s_bus_async_alloc_message(struct aws_bus *bus) { - struct bus_async_impl *impl = bus->impl; - - /* try the free list first */ - if (!aws_linked_list_empty(&impl->queue.free)) { - struct aws_linked_list_node *msg_node = aws_linked_list_pop_back(&impl->queue.free); - struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node); - return msg; - } - - /* unreliable will re-use the oldest message */ - if (!impl->reliable) { - struct aws_linked_list_node *msg_node = aws_linked_list_pop_front(&impl->queue.msgs); - struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node); - s_bus_async_free_message(bus, msg); - return s_bus_async_alloc_message(bus); - } - - return aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_message)); -} - -/* - * resolve all adds and removes of listeners, in FIFO order - * NOTE: expects mutex to be held by caller - */ -static void s_bus_apply_listeners(struct aws_bus *bus, struct aws_linked_list *pending_subs) { - struct bus_async_impl *impl = bus->impl; - while (!aws_linked_list_empty(pending_subs)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(pending_subs); - struct pending_listener *listener = AWS_CONTAINER_OF(node, struct pending_listener, list_node); - if (listener->add) { - s_bus_subscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data); - } else if (listener->remove) { - s_bus_unsubscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data); - } - aws_mem_release(bus->allocator, listener); - } -} - -static void s_bus_async_deliver_messages(struct aws_bus *bus, struct aws_linked_list *pending_msgs) { - struct bus_async_impl *impl = bus->impl; - struct aws_linked_list_node *msg_node = aws_linked_list_begin(pending_msgs); - for (; msg_node != aws_linked_list_end(pending_msgs); msg_node = aws_linked_list_next(msg_node)) { - struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node); - s_bus_deliver_msg(bus, msg->address, &impl->slots.table, msg->payload); - s_bus_message_clean_up(msg); - } - - /* push all pending messages back on the free list */ - aws_mutex_lock(&impl->queue.mutex); - { - while (!aws_linked_list_empty(pending_msgs)) { - msg_node = aws_linked_list_pop_front(pending_msgs); - struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node); - s_bus_async_free_message(bus, msg); - } - } - aws_mutex_unlock(&impl->queue.mutex); -} - -static void s_bus_async_clean_up(struct aws_bus *bus) { - struct bus_async_impl *impl = bus->impl; - - /* shut down delivery thread, clean up dispatch */ - AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: starting final drain", (void *)bus); - aws_mutex_lock(&impl->queue.mutex); - impl->dispatch.running = false; - aws_mutex_unlock(&impl->queue.mutex); - aws_condition_variable_notify_one(&impl->dispatch.notify); - /* Spin wait for the final drain and dispatch thread to complete */ - while (!aws_atomic_load_int(&impl->dispatch.exited)) { - aws_thread_current_sleep(1000 * 1000); /* 1 microsecond */ - } - AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: finished final drain", (void *)bus); - aws_thread_join(&impl->dispatch.thread); - aws_thread_clean_up(&impl->dispatch.thread); - aws_condition_variable_clean_up(&impl->dispatch.notify); - - /* should be impossible for subs or msgs to remain after final drain */ - AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.msgs)); - AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.subs)); - - /* this frees everything that the free/msgs lists point to */ - if (impl->queue.buffer) { - aws_mem_release(bus->allocator, impl->queue.buffer); - } - - aws_mutex_clean_up(&impl->queue.mutex); - - aws_hash_table_clean_up(&impl->slots.table); - aws_mem_release(bus->allocator, impl); -} - -static bool s_bus_async_should_wake_up(void *user_data) { - struct bus_async_impl *impl = user_data; - return !impl->dispatch.running || !aws_linked_list_empty(&impl->queue.subs) || - !aws_linked_list_empty(&impl->queue.msgs); -} - -static bool s_bus_async_is_running(struct bus_async_impl *impl) { - aws_mutex_lock(&impl->queue.mutex); - bool running = impl->dispatch.running; - aws_mutex_unlock(&impl->queue.mutex); - return running; -} - -/* Async bus delivery thread loop */ -static void s_bus_async_deliver(void *user_data) { - struct aws_bus *bus = user_data; - struct bus_async_impl *impl = bus->impl; - - aws_atomic_store_int(&impl->dispatch.started, 1); - AWS_LOGF_DEBUG(AWS_LS_COMMON_BUS, "bus %p: delivery thread loop started", (void *)bus); - - /* once shutdown has been triggered, need to drain one more time to ensure all queues are empty */ - int pending_drains = 1; - do { - struct aws_linked_list pending_msgs; - aws_linked_list_init(&pending_msgs); - - struct aws_linked_list pending_subs; - aws_linked_list_init(&pending_subs); - - aws_mutex_lock(&impl->queue.mutex); - { - aws_condition_variable_wait_pred( - &impl->dispatch.notify, &impl->queue.mutex, s_bus_async_should_wake_up, impl); - - /* copy out any queued subs/unsubs */ - aws_linked_list_swap_contents(&impl->queue.subs, &pending_subs); - /* copy out any queued messages */ - aws_linked_list_swap_contents(&impl->queue.msgs, &pending_msgs); - } - aws_mutex_unlock(&impl->queue.mutex); - - /* first resolve subs/unsubs */ - if (!aws_linked_list_empty(&pending_subs)) { - s_bus_apply_listeners(bus, &pending_subs); - } - - /* Then deliver queued messages */ - if (!aws_linked_list_empty(&pending_msgs)) { - s_bus_async_deliver_messages(bus, &pending_msgs); - } - } while (s_bus_async_is_running(impl) || pending_drains--); - - /* record that the dispatch thread is done */ - aws_atomic_store_int(&impl->dispatch.exited, 1); -} - -int s_bus_async_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) { - struct bus_async_impl *impl = bus->impl; - - aws_mutex_lock(&impl->queue.mutex); - { - if (!impl->dispatch.running) { - AWS_LOGF_WARN( - AWS_LS_COMMON_BUS, "bus %p: message sent after clean_up: address: %" PRIu64 "", (void *)bus, address); - aws_mutex_unlock(&impl->queue.mutex); - return aws_raise_error(AWS_ERROR_INVALID_STATE); - } - - struct bus_message *msg = s_bus_async_alloc_message(bus); - msg->address = address; - msg->payload = payload; - msg->destructor = destructor; - - /* push the message onto the delivery queue */ - aws_linked_list_push_back(&impl->queue.msgs, &msg->list_node); - } - aws_mutex_unlock(&impl->queue.mutex); - - /* notify the delivery thread to wake up */ - aws_condition_variable_notify_one(&impl->dispatch.notify); - - return AWS_OP_SUCCESS; -} - -int s_bus_async_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) { - struct bus_async_impl *impl = bus->impl; - - if (address == AWS_BUS_ADDRESS_CLOSE) { - AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot subscribe to AWS_BUS_ADDRESS_CLOSE"); - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - } - - aws_mutex_lock(&impl->queue.mutex); - { - if (!impl->dispatch.running) { - AWS_LOGF_WARN( - AWS_LS_COMMON_BUS, - "bus %p: subscribe requested after clean_up: address: %" PRIu64 "", - (void *)bus, - address); - aws_mutex_unlock(&impl->queue.mutex); - return aws_raise_error(AWS_ERROR_INVALID_STATE); - } - - struct pending_listener *sub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener)); - sub->address = address; - sub->listener = listener; - sub->user_data = user_data; - sub->add = true; - aws_linked_list_push_back(&impl->queue.subs, &sub->list_node); - } - aws_mutex_unlock(&impl->queue.mutex); - - /* notify the delivery thread to wake up */ - aws_condition_variable_notify_one(&impl->dispatch.notify); - return AWS_OP_SUCCESS; -} - -void s_bus_async_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) { - struct bus_async_impl *impl = bus->impl; - - if (address == AWS_BUS_ADDRESS_CLOSE) { - AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot unsubscribe from AWS_BUS_ADDRESS_CLOSE"); - return; - } - - aws_mutex_lock(&impl->queue.mutex); - { - if (!impl->dispatch.running) { - AWS_LOGF_WARN( - AWS_LS_COMMON_BUS, - "bus %p: unsubscribe requested after clean_up: address: %" PRIu64 "", - (void *)bus, - address); - aws_mutex_unlock(&impl->queue.mutex); - return; - } - - struct pending_listener *unsub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener)); - unsub->address = address; - unsub->listener = listener; - unsub->user_data = user_data; - unsub->remove = true; - aws_linked_list_push_back(&impl->queue.subs, &unsub->list_node); - } - aws_mutex_unlock(&impl->queue.mutex); - - /* notify the delivery thread to wake up */ - aws_condition_variable_notify_one(&impl->dispatch.notify); -} - -static struct bus_vtable bus_async_vtable = { - .clean_up = s_bus_async_clean_up, - .send = s_bus_async_send, - .subscribe = s_bus_async_subscribe, - .unsubscribe = s_bus_async_unsubscribe, -}; - -static void s_bus_async_init(struct aws_bus *bus, const struct aws_bus_options *options) { - struct bus_async_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_async_impl)); - impl->vtable = bus_async_vtable; - impl->reliable = (options->policy == AWS_BUS_ASYNC_RELIABLE); - - /* init msg queue */ - if (aws_mutex_init(&impl->queue.mutex)) { - AWS_LOGF_ERROR( - AWS_LS_COMMON_BUS, - "bus %p: Unable to initialize queue synchronization: %s", - (void *)bus, - aws_error_name(aws_last_error())); - goto error; - } - aws_linked_list_init(&impl->queue.msgs); - aws_linked_list_init(&impl->queue.free); - aws_linked_list_init(&impl->queue.subs); - - /* push as many bus_messages as we can into the free list from the buffer */ - if (options->buffer_size) { - impl->queue.buffer = aws_mem_calloc(bus->allocator, 1, options->buffer_size); - impl->queue.buffer_end = ((uint8_t *)impl->queue.buffer) + options->buffer_size; - const int msg_count = (int)(options->buffer_size / sizeof(struct bus_message)); - for (int msg_idx = 0; msg_idx < msg_count; ++msg_idx) { - struct bus_message *msg = (void *)&((char *)impl->queue.buffer)[msg_idx * sizeof(struct bus_message)]; - aws_linked_list_push_back(&impl->queue.free, &msg->list_node); - } - } - - /* init subscription table */ - if (aws_hash_table_init( - &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) { - AWS_LOGF_ERROR( - AWS_LS_COMMON_BUS, - "bus %p: Unable to initialize bus addressing table: %s", - (void *)bus, - aws_error_name(aws_last_error())); - goto error; - } - - /* Setup dispatch thread */ - if (aws_condition_variable_init(&impl->dispatch.notify)) { - AWS_LOGF_ERROR( - AWS_LS_COMMON_BUS, - "bus %p: Unable to initialize async notify: %s", - (void *)bus, - aws_error_name(aws_last_error())); - goto error; - } - - if (aws_thread_init(&impl->dispatch.thread, bus->allocator)) { - AWS_LOGF_ERROR( - AWS_LS_COMMON_BUS, - "bus %p: Unable to initialize background thread: %s", - (void *)bus, - aws_error_name(aws_last_error())); - goto error; - } - - impl->dispatch.running = true; - aws_atomic_init_int(&impl->dispatch.started, 0); - aws_atomic_init_int(&impl->dispatch.exited, 0); - if (aws_thread_launch(&impl->dispatch.thread, s_bus_async_deliver, bus, aws_default_thread_options())) { - AWS_LOGF_ERROR( - AWS_LS_COMMON_BUS, - "bus %p: Unable to launch delivery thread: %s", - (void *)bus, - aws_error_name(aws_last_error())); - goto error; - } - - /* wait for dispatch thread to start before returning control */ - AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Waiting for delivery thread to start", (void *)bus); - while (!aws_atomic_load_int(&impl->dispatch.started)) { - aws_thread_current_sleep(1000 * 1000); - } - AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Delivery thread started", (void *)bus); - - return; - -error: - aws_thread_clean_up(&impl->dispatch.thread); - aws_condition_variable_clean_up(&impl->dispatch.notify); - aws_hash_table_clean_up(&impl->slots.table); - aws_mem_release(bus->allocator, &impl->queue.buffer); - aws_mutex_clean_up(&impl->queue.mutex); - aws_mem_release(bus->allocator, impl); - bus->impl = NULL; -} - -/* - * Public API - */ -struct aws_bus *aws_bus_new(struct aws_allocator *allocator, const struct aws_bus_options *options) { - struct aws_bus *bus = aws_mem_calloc(allocator, 1, sizeof(struct aws_bus)); - bus->allocator = allocator; - - switch (options->policy) { - case AWS_BUS_ASYNC_RELIABLE: - case AWS_BUS_ASYNC_UNRELIABLE: - s_bus_async_init(bus, options); - break; - case AWS_BUS_SYNC_RELIABLE: - s_bus_sync_init(bus, options); - break; - } - - if (!bus->impl) { - aws_mem_release(allocator, bus); - return NULL; - } - - return bus; -} - -void aws_bus_destroy(struct aws_bus *bus) { - struct bus_vtable *vtable = bus->impl; - vtable->clean_up(bus); - aws_mem_release(bus->allocator, bus); -} - -int aws_bus_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) { - struct bus_vtable *vtable = bus->impl; - return vtable->subscribe(bus, address, listener, user_data); -} - -void aws_bus_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) { - struct bus_vtable *vtable = bus->impl; - vtable->unsubscribe(bus, address, listener, user_data); -} - -int aws_bus_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) { - struct bus_vtable *vtable = bus->impl; - return vtable->send(bus, address, payload, destructor); -} - -#ifdef _MSC_VER -# pragma warning(pop) -#endif diff --git a/contrib/restricted/aws/aws-c-common/source/byte_buf.c b/contrib/restricted/aws/aws-c-common/source/byte_buf.c index f52aa16b455..b815b6bfb74 100644 --- a/contrib/restricted/aws/aws-c-common/source/byte_buf.c +++ b/contrib/restricted/aws/aws-c-common/source/byte_buf.c @@ -1631,7 +1631,7 @@ int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_curs return AWS_OP_ERR; } - from_and_update->ptr = to->buffer + (to->len - from_and_update->len); + from_and_update->ptr = to->buffer == NULL ? NULL : to->buffer + (to->len - from_and_update->len); return AWS_OP_SUCCESS; } diff --git a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c index bf2db81e0a0..0699e7fbbdf 100644 --- a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c +++ b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c @@ -54,13 +54,11 @@ static const struct aws_cli_option *s_find_option_from_c_str( const struct aws_cli_option *option = &longopts[index]; while (option->name || option->val != 0) { - if (option->name) { - if (option->name && !strcmp(search_for, option->name)) { - if (longindex) { - *longindex = index; - } - return option; + if (option->name && !strcmp(search_for, option->name)) { + if (longindex) { + *longindex = index; } + return option; } option = &longopts[++index]; diff --git a/contrib/restricted/aws/aws-c-common/source/common.c b/contrib/restricted/aws/aws-c-common/source/common.c index a845e22acfc..062d23228d5 100644 --- a/contrib/restricted/aws/aws-c-common/source/common.c +++ b/contrib/restricted/aws/aws-c-common/source/common.c @@ -256,6 +256,12 @@ static struct aws_error_info errors[] = { AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_PLATFORM_NOT_SUPPORTED, "Feature not supported on this platform"), + AWS_DEFINE_ERROR_INFO_COMMON( + AWS_ERROR_INVALID_UTF8, + "Invalid UTF-8"), + AWS_DEFINE_ERROR_INFO_COMMON( + AWS_ERROR_GET_HOME_DIRECTORY_FAILED, + "Failed to get home directory"), }; /* clang-format on */ @@ -304,7 +310,7 @@ void aws_common_library_init(struct aws_allocator *allocator) { /* libnuma defines set_mempolicy() as a WEAK symbol. Loading into the global symbol table overwrites symbols and assumptions due to the way loaders and dlload are often implemented and those symbols are defined by things like libpthread.so on some unix distros. Sorry about the memory usage here, but it's our only safe choice. - Also, please don't do numa configurations if memory is your economic bottlneck. */ + Also, please don't do numa configurations if memory is your economic bottleneck. */ g_libnuma_handle = dlopen("libnuma.so", RTLD_LOCAL); /* turns out so versioning is really inconsistent these days */ diff --git a/contrib/restricted/aws/aws-c-common/source/date_time.c b/contrib/restricted/aws/aws-c-common/source/date_time.c index 77ec6ae0c17..cee4a90d88b 100644 --- a/contrib/restricted/aws/aws-c-common/source/date_time.c +++ b/contrib/restricted/aws/aws-c-common/source/date_time.c @@ -61,7 +61,7 @@ static void s_check_init_str_to_int(void) { } } -/* Get the 0-11 monthy number from a string representing Month. Case insensitive and will stop on abbreviation*/ +/* Get the 0-11 monthly number from a string representing Month. Case insensitive and will stop on abbreviation*/ static int get_month_number_from_str(const char *time_string, size_t start_index, size_t stop_index) { s_check_init_str_to_int(); diff --git a/contrib/restricted/aws/aws-c-common/source/encoding.c b/contrib/restricted/aws/aws-c-common/source/encoding.c index 038a7d74e9d..9ca5ca4fbaa 100644 --- a/contrib/restricted/aws/aws-c-common/source/encoding.c +++ b/contrib/restricted/aws/aws-c-common/source/encoding.c @@ -38,7 +38,7 @@ static inline bool aws_common_private_has_avx2(void) { static const uint8_t *HEX_CHARS = (const uint8_t *)"0123456789abcdef"; -static const uint8_t BASE64_SENTIANAL_VALUE = 0xff; +static const uint8_t BASE64_SENTINEL_VALUE = 0xff; static const uint8_t BASE64_ENCODING_TABLE[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; /* in this table, 0xDD is an invalid decoded value, if you have to do byte counting for any reason, there's 16 bytes @@ -337,10 +337,10 @@ int aws_base64_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, stru return AWS_OP_SUCCESS; } -static inline int s_base64_get_decoded_value(unsigned char to_decode, uint8_t *value, int8_t allow_sentinal) { +static inline int s_base64_get_decoded_value(unsigned char to_decode, uint8_t *value, int8_t allow_sentinel) { uint8_t decode_value = BASE64_DECODING_TABLE[(size_t)to_decode]; - if (decode_value != 0xDD && (decode_value != BASE64_SENTIANAL_VALUE || allow_sentinal)) { + if (decode_value != 0xDD && (decode_value != BASE64_SENTINEL_VALUE || allow_sentinel)) { *value = decode_value; return AWS_OP_SUCCESS; } @@ -401,9 +401,9 @@ int aws_base64_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, stru output->buffer[buffer_index++] = (uint8_t)((value1 << 2) | ((value2 >> 4) & 0x03)); - if (value3 != BASE64_SENTIANAL_VALUE) { + if (value3 != BASE64_SENTINEL_VALUE) { output->buffer[buffer_index++] = (uint8_t)(((value2 << 4) & 0xF0) | ((value3 >> 2) & 0x0F)); - if (value4 != BASE64_SENTIANAL_VALUE) { + if (value4 != BASE64_SENTINEL_VALUE) { output->buffer[buffer_index] = (uint8_t)((value3 & 0x03) << 6 | value4); } } @@ -412,7 +412,7 @@ int aws_base64_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, stru return AWS_OP_SUCCESS; } -struct aws_utf8_validator { +struct aws_utf8_decoder { struct aws_allocator *alloc; /* Value of current codepoint, updated as we read each byte */ uint32_t codepoint; @@ -421,54 +421,66 @@ struct aws_utf8_validator { uint32_t min; /* Number of bytes remaining the current codepoint */ uint8_t remaining; + /* Custom callback */ + int (*on_codepoint)(uint32_t codepoint, void *user_data); + /* user_data for on_codepoint */ + void *user_data; }; -struct aws_utf8_validator *aws_utf8_validator_new(struct aws_allocator *allocator) { - struct aws_utf8_validator *validator = aws_mem_calloc(allocator, 1, sizeof(struct aws_utf8_validator)); - validator->alloc = allocator; - return validator; +struct aws_utf8_decoder *aws_utf8_decoder_new( + struct aws_allocator *allocator, + const struct aws_utf8_decoder_options *options) { + + struct aws_utf8_decoder *decoder = aws_mem_calloc(allocator, 1, sizeof(struct aws_utf8_decoder)); + decoder->alloc = allocator; + if (options) { + decoder->on_codepoint = options->on_codepoint; + decoder->user_data = options->user_data; + } + return decoder; } -void aws_utf8_validator_destroy(struct aws_utf8_validator *validator) { - if (validator) { - aws_mem_release(validator->alloc, validator); +void aws_utf8_decoder_destroy(struct aws_utf8_decoder *decoder) { + if (decoder) { + aws_mem_release(decoder->alloc, decoder); } } -void aws_utf8_validator_reset(struct aws_utf8_validator *validator) { - validator->codepoint = 0; - validator->min = 0; - validator->remaining = 0; +void aws_utf8_decoder_reset(struct aws_utf8_decoder *decoder) { + decoder->codepoint = 0; + decoder->min = 0; + decoder->remaining = 0; } /* Why yes, this could be optimized. */ -int aws_utf8_validator_update(struct aws_utf8_validator *validator, struct aws_byte_cursor bytes) { +int aws_utf8_decoder_update(struct aws_utf8_decoder *decoder, struct aws_byte_cursor bytes) { + /* We're respecting RFC-3629, which uses 1 to 4 byte sequences (never 5 or 6) */ for (size_t i = 0; i < bytes.len; ++i) { uint8_t byte = bytes.ptr[i]; - if (validator->remaining == 0) { + if (decoder->remaining == 0) { /* Check first byte of the codepoint to determine how many more bytes remain */ if ((byte & 0x80) == 0x00) { /* 1 byte codepoints start with 0xxxxxxx */ - validator->remaining = 0; - validator->codepoint = byte; - validator->min = 0; + decoder->remaining = 0; + decoder->codepoint = byte; + decoder->min = 0; } else if ((byte & 0xE0) == 0xC0) { /* 2 byte codepoints start with 110xxxxx */ - validator->remaining = 1; - validator->codepoint = byte & 0x1F; - validator->min = 0x80; + decoder->remaining = 1; + decoder->codepoint = byte & 0x1F; + decoder->min = 0x80; } else if ((byte & 0xF0) == 0xE0) { /* 3 byte codepoints start with 1110xxxx */ - validator->remaining = 2; - validator->codepoint = byte & 0x0F; - validator->min = 0x800; + decoder->remaining = 2; + decoder->codepoint = byte & 0x0F; + decoder->min = 0x800; } else if ((byte & 0xF8) == 0xF0) { /* 4 byte codepoints start with 11110xxx */ - validator->remaining = 3; - validator->codepoint = byte & 0x07; - validator->min = 0x10000; + decoder->remaining = 3; + decoder->codepoint = byte & 0x07; + decoder->min = 0x10000; } else { return aws_raise_error(AWS_ERROR_INVALID_UTF8); } @@ -481,45 +493,58 @@ int aws_utf8_validator_update(struct aws_utf8_validator *validator, struct aws_b /* Insert the 6 newly decoded bits: * shifting left anything we've already decoded, and insert the new bits to the right */ - validator->codepoint = (validator->codepoint << 6) | (byte & 0x3F); + decoder->codepoint = (decoder->codepoint << 6) | (byte & 0x3F); /* If we've decoded the whole codepoint, check it for validity * (don't need to do these particular checks on 1 byte codepoints) */ - if (--validator->remaining == 0) { + if (--decoder->remaining == 0) { /* Check that it's not "overlong" (encoded using more bytes than necessary) */ - if (validator->codepoint < validator->min) { + if (decoder->codepoint < decoder->min) { return aws_raise_error(AWS_ERROR_INVALID_UTF8); } /* UTF-8 prohibits encoding character numbers between U+D800 and U+DFFF, * which are reserved for use with the UTF-16 encoding form (as * surrogate pairs) and do not directly represent characters */ - if (validator->codepoint >= 0xD800 && validator->codepoint <= 0xDFFF) { + if (decoder->codepoint >= 0xD800 && decoder->codepoint <= 0xDFFF) { return aws_raise_error(AWS_ERROR_INVALID_UTF8); } } } + + /* Invoke user's on_codepoint callback */ + if (decoder->on_codepoint && decoder->remaining == 0) { + if (decoder->on_codepoint(decoder->codepoint, decoder->user_data)) { + return AWS_OP_ERR; + } + } } return AWS_OP_SUCCESS; } -int aws_utf8_validator_finalize(struct aws_utf8_validator *validator) { - bool valid = validator->remaining == 0; - aws_utf8_validator_reset(validator); +int aws_utf8_decoder_finalize(struct aws_utf8_decoder *decoder) { + bool valid = decoder->remaining == 0; + aws_utf8_decoder_reset(decoder); if (AWS_LIKELY(valid)) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_INVALID_UTF8); } -bool aws_text_is_valid_utf8(struct aws_byte_cursor bytes) { - struct aws_utf8_validator validator = {.remaining = 0}; - if (aws_utf8_validator_update(&validator, bytes)) { - return false; +int aws_decode_utf8(struct aws_byte_cursor bytes, const struct aws_utf8_decoder_options *options) { + struct aws_utf8_decoder decoder = { + .on_codepoint = options ? options->on_codepoint : NULL, + .user_data = options ? options->user_data : NULL, + }; + + if (aws_utf8_decoder_update(&decoder, bytes)) { + return AWS_OP_ERR; } - if (validator.remaining != 0) { - return false; + + if (aws_utf8_decoder_finalize(&decoder)) { + return AWS_OP_ERR; } - return true; + + return AWS_OP_SUCCESS; } diff --git a/contrib/restricted/aws/aws-c-common/source/error.c b/contrib/restricted/aws/aws-c-common/source/error.c index bdd4dfcd675..ad3cec86931 100644 --- a/contrib/restricted/aws/aws-c-common/source/error.c +++ b/contrib/restricted/aws/aws-c-common/source/error.c @@ -145,8 +145,20 @@ void aws_register_error_info(const struct aws_error_info_list *error_info) { } #if DEBUG_BUILD + /* Assert that first error has the right value */ + const int expected_first_code = slot_index << AWS_ERROR_ENUM_STRIDE_BITS; + if (error_info->error_list[0].error_code != expected_first_code) { + fprintf( + stderr, + "Missing info: First error in list should be %d, not %d (%s)\n", + expected_first_code, + error_info->error_list[0].error_code, + error_info->error_list[0].literal_name); + AWS_FATAL_ASSERT(0); + } + /* Assert that error info entries are in the right order. */ - for (int i = 1; i < error_info->count; ++i) { + for (int i = 0; i < error_info->count; ++i) { const int expected_code = min_range + i; const struct aws_error_info *info = &error_info->error_list[i]; if (info->error_code != expected_code) { @@ -193,13 +205,17 @@ int aws_translate_and_raise_io_error(int error_no) { case EISDIR: case ENAMETOOLONG: case ENOENT: + case ENOTDIR: return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); + case EMFILE: case ENFILE: return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED); case ENOMEM: return aws_raise_error(AWS_ERROR_OOM); case ENOSPC: return aws_raise_error(AWS_ERROR_NO_SPACE); + case ENOTEMPTY: + return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY); default: return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } diff --git a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c index 8dd79bf1ecd..d6e49d9a894 100644 --- a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c +++ b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c @@ -89,12 +89,6 @@ typedef struct { const unsigned char *json; size_t position; } error; -static error global_error = { NULL, 0 }; - -CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) -{ - return (const char*) (global_error.json + global_error.position); -} CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item) { @@ -1094,10 +1088,6 @@ CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; cJSON *item = NULL; - /* reset error position */ - global_error.json = NULL; - global_error.position = 0; - if (value == NULL || 0 == buffer_length) { goto fail; @@ -1162,7 +1152,6 @@ fail: *return_parse_end = (const char*)local_error.json + local_error.position; } - global_error = local_error; } return NULL; diff --git a/contrib/restricted/aws/aws-c-common/source/file.c b/contrib/restricted/aws/aws-c-common/source/file.c index 95a80b2ca53..5f490003a03 100644 --- a/contrib/restricted/aws/aws-c-common/source/file.c +++ b/contrib/restricted/aws/aws-c-common/source/file.c @@ -13,7 +13,14 @@ FILE *aws_fopen(const char *file_path, const char *mode) { if (!file_path || strlen(file_path) == 0) { - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file %s", file_path); + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. path is empty"); + aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); + return NULL; + } + + if (!mode || strlen(mode) == 0) { + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. mode is empty"); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } @@ -34,9 +41,10 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat if (fp) { if (fseek(fp, 0L, SEEK_END)) { - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno); + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno_value); fclose(fp); - return aws_translate_and_raise_io_error(errno); + return aws_translate_and_raise_io_error(errno_value); } size_t allocation_size = (size_t)ftell(fp) + 1; @@ -52,16 +60,18 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat out_buf->buffer[out_buf->len] = 0; if (fseek(fp, 0L, SEEK_SET)) { - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno); + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno_value); aws_byte_buf_clean_up(out_buf); fclose(fp); - return aws_translate_and_raise_io_error(errno); + return aws_translate_and_raise_io_error(errno_value); } size_t read = fread(out_buf->buffer, 1, out_buf->len, fp); + int errno_cpy = errno; /* Always cache errno before potential side-effect */ fclose(fp); if (read < out_buf->len) { - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to read file %s with errno %d", filename, errno); + AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to read file %s with errno %d", filename, errno_cpy); aws_secure_zero(out_buf->buffer, out_buf->len); aws_byte_buf_clean_up(out_buf); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); @@ -70,12 +80,7 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat return AWS_OP_SUCCESS; } - AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file %s with errno %d", filename, errno); - - if (errno == 0) { - return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); - } - return aws_translate_and_raise_io_error(errno); + return AWS_OP_ERR; } bool aws_is_any_directory_separator(char value) { diff --git a/contrib/restricted/aws/aws-c-common/source/json.c b/contrib/restricted/aws/aws-c-common/source/json.c index 387f41f1837..0131ea116b5 100644 --- a/contrib/restricted/aws/aws-c-common/source/json.c +++ b/contrib/restricted/aws/aws-c-common/source/json.c @@ -15,7 +15,7 @@ static struct aws_allocator *s_aws_json_module_allocator = NULL; static bool s_aws_json_module_initialized = false; struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string) { - struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string); + struct aws_string *tmp = aws_string_new_from_cursor(allocator, &string); void *ret_val = cJSON_CreateString(aws_string_c_str(tmp)); aws_string_destroy_secure(tmp); return ret_val; @@ -47,7 +47,7 @@ struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator } int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (!cJSON_IsString(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } @@ -56,7 +56,7 @@ int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byt } int aws_json_value_get_number(const struct aws_json_value *value, double *output) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (!cJSON_IsNumber(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } @@ -65,7 +65,7 @@ int aws_json_value_get_number(const struct aws_json_value *value, double *output } int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (!cJSON_IsBool(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } @@ -109,7 +109,7 @@ struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_valu void *return_value = NULL; struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key); - struct cJSON *cjson = (struct cJSON *)object; + const struct cJSON *cjson = (const struct cJSON *)object; if (!cJSON_IsObject(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; @@ -130,7 +130,7 @@ bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key); bool result = false; - struct cJSON *cjson = (struct cJSON *)object; + const struct cJSON *cjson = (const struct cJSON *)object; if (!cJSON_IsObject(cjson)) { goto done; } @@ -172,7 +172,7 @@ int aws_json_const_iterate_object( void *user_data) { int result = AWS_OP_ERR; - struct cJSON *cjson = (struct cJSON *)object; + const struct cJSON *cjson = (const struct cJSON *)object; if (!cJSON_IsObject(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; @@ -182,7 +182,7 @@ int aws_json_const_iterate_object( cJSON_ArrayForEach(key, cjson) { bool should_continue = true; struct aws_byte_cursor key_cur = aws_byte_cursor_from_c_str(key->string); - if (on_member(&key_cur, (struct aws_json_value *)key, &should_continue, user_data)) { + if (on_member(&key_cur, (const struct aws_json_value *)key, &should_continue, user_data)) { goto done; } @@ -214,8 +214,7 @@ int aws_json_value_add_array_element(struct aws_json_value *array, const struct } struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index) { - - struct cJSON *cjson = (struct cJSON *)array; + const struct cJSON *cjson = (const struct cJSON *)array; if (!cJSON_IsArray(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; @@ -230,7 +229,7 @@ struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *a } size_t aws_json_get_array_size(const struct aws_json_value *array) { - struct cJSON *cjson = (struct cJSON *)array; + const struct cJSON *cjson = (const struct cJSON *)array; if (!cJSON_IsArray(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return 0; @@ -259,7 +258,7 @@ int aws_json_const_iterate_array( void *user_data) { int result = AWS_OP_ERR; - struct cJSON *cjson = (struct cJSON *)array; + const struct cJSON *cjson = (const struct cJSON *)array; if (!cJSON_IsArray(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; @@ -269,7 +268,7 @@ int aws_json_const_iterate_array( const cJSON *value = NULL; cJSON_ArrayForEach(value, cjson) { bool should_continue = true; - if (on_value(idx, (struct aws_json_value *)value, &should_continue, user_data)) { + if (on_value(idx, (const struct aws_json_value *)value, &should_continue, user_data)) { goto done; } @@ -286,13 +285,13 @@ done: } bool aws_json_value_compare(const struct aws_json_value *a, const struct aws_json_value *b, bool is_case_sensitive) { - struct cJSON *cjson_a = (struct cJSON *)a; - struct cJSON *cjson_b = (struct cJSON *)b; + const struct cJSON *cjson_a = (const struct cJSON *)a; + const struct cJSON *cjson_b = (const struct cJSON *)b; return cJSON_Compare(cjson_a, cjson_b, is_case_sensitive); } struct aws_json_value *aws_json_value_duplicate(const struct aws_json_value *value) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; @@ -307,7 +306,7 @@ struct aws_json_value *aws_json_value_duplicate(const struct aws_json_value *val } bool aws_json_value_is_string(const struct aws_json_value *value) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } @@ -315,7 +314,7 @@ bool aws_json_value_is_string(const struct aws_json_value *value) { } bool aws_json_value_is_number(const struct aws_json_value *value) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } @@ -323,7 +322,7 @@ bool aws_json_value_is_number(const struct aws_json_value *value) { } bool aws_json_value_is_array(const struct aws_json_value *value) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } @@ -331,7 +330,7 @@ bool aws_json_value_is_array(const struct aws_json_value *value) { } bool aws_json_value_is_boolean(const struct aws_json_value *value) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } @@ -339,7 +338,7 @@ bool aws_json_value_is_boolean(const struct aws_json_value *value) { } bool aws_json_value_is_null(const struct aws_json_value *value) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } @@ -347,7 +346,7 @@ bool aws_json_value_is_null(const struct aws_json_value *value) { } bool aws_json_value_is_object(const struct aws_json_value *value) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } @@ -391,7 +390,7 @@ void aws_json_value_destroy(struct aws_json_value *value) { } int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } @@ -409,7 +408,7 @@ int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct a } int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output) { - struct cJSON *cjson = (struct cJSON *)value; + const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } @@ -427,7 +426,7 @@ int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value } struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string) { - struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string); + struct aws_string *tmp = aws_string_new_from_cursor(allocator, &string); struct cJSON *cjson = cJSON_Parse(aws_string_c_str(tmp)); aws_string_destroy_secure(tmp); return (void *)cjson; diff --git a/contrib/restricted/aws/aws-c-common/source/log_formatter.c b/contrib/restricted/aws/aws-c-common/source/log_formatter.c index 513a7f87b45..d4be0c0c6d2 100644 --- a/contrib/restricted/aws/aws-c-common/source/log_formatter.c +++ b/contrib/restricted/aws/aws-c-common/source/log_formatter.c @@ -16,7 +16,7 @@ * Default formatter implementation */ -#if _MSC_VER +#ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif diff --git a/contrib/restricted/aws/aws-c-common/source/log_writer.c b/contrib/restricted/aws/aws-c-common/source/log_writer.c index 6eea2fc3c5e..5f5bc4f6fd5 100644 --- a/contrib/restricted/aws/aws-c-common/source/log_writer.c +++ b/contrib/restricted/aws/aws-c-common/source/log_writer.c @@ -27,7 +27,8 @@ static int s_aws_file_writer_write(struct aws_log_writer *writer, const struct a size_t length = output->len; if (fwrite(output->bytes, 1, length, impl->log_file) < length) { - return aws_translate_and_raise_io_error(errno); + int errno_value = errno; /* Always cache errno before potential side-effect */ + return aws_translate_and_raise_io_error(errno_value); } return AWS_OP_SUCCESS; @@ -76,7 +77,7 @@ static int s_aws_file_writer_init_internal( impl->log_file = aws_fopen(file_name_to_open, "a+"); if (impl->log_file == NULL) { aws_mem_release(allocator, impl); - return aws_translate_and_raise_io_error(errno); + return AWS_OP_ERR; } impl->close_file_on_cleanup = true; } else { diff --git a/contrib/restricted/aws/aws-c-common/source/logging.c b/contrib/restricted/aws/aws-c-common/source/logging.c index f9a6c5a2e65..fdc29576d82 100644 --- a/contrib/restricted/aws/aws-c-common/source/logging.c +++ b/contrib/restricted/aws/aws-c-common/source/logging.c @@ -15,7 +15,7 @@ #include <errno.h> #include <stdarg.h> -#if _MSC_VER +#ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif @@ -468,7 +468,7 @@ static int s_noalloc_stderr_logger_log( va_list format_args; va_start(format_args, format); -#if _MSC_VER +#ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4221) /* allow struct member to reference format_buffer */ #endif @@ -484,7 +484,7 @@ static int s_noalloc_stderr_logger_log( .amount_written = 0, }; -#if _MSC_VER +#ifdef _MSC_VER # pragma warning(pop) /* disallow struct member to reference local value */ #endif @@ -502,7 +502,8 @@ static int s_noalloc_stderr_logger_log( int write_result = AWS_OP_SUCCESS; if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) { - aws_translate_and_raise_io_error(errno); + int errno_value = errno; /* Always cache errno before potential side-effect */ + aws_translate_and_raise_io_error(errno_value); write_result = AWS_OP_ERR; } @@ -561,6 +562,10 @@ int aws_logger_init_noalloc( } else { /* _MSC_VER */ if (options->filename != NULL) { impl->file = aws_fopen(options->filename, "w"); + if (!impl->file) { + aws_mem_release(allocator, impl); + return AWS_OP_ERR; + } impl->should_close = true; } else { impl->file = stderr; diff --git a/contrib/restricted/aws/aws-c-common/source/memtrace.c b/contrib/restricted/aws/aws-c-common/source/memtrace.c index 7362e07a30f..651fd93612a 100644 --- a/contrib/restricted/aws/aws-c-common/source/memtrace.c +++ b/contrib/restricted/aws/aws-c-common/source/memtrace.c @@ -5,19 +5,19 @@ #include <aws/common/atomics.h> #include <aws/common/byte_buf.h> +#include <aws/common/clock.h> #include <aws/common/hash_table.h> #include <aws/common/logging.h> #include <aws/common/mutex.h> #include <aws/common/priority_queue.h> #include <aws/common/string.h> #include <aws/common/system_info.h> -#include <aws/common/time.h> /* describes a single live allocation. * allocated by aws_default_allocator() */ struct alloc_info { size_t size; - time_t time; + uint64_t time; uint64_t stack; /* hash of stack frame pointers */ }; @@ -43,8 +43,8 @@ struct stack_trace { #endif /* Tracking structure, used as the allocator impl. - * This structure, and all its bookkeeping datastructures, are created with the aws_default_allocator(). - * This is not customizeable because it's too expensive for every little allocation to store + * This structure, and all its bookkeeping data structures, are created with the aws_default_allocator(). + * This is not customizable because it's too expensive for every little allocation to store * a pointer back to its original allocator. */ struct alloc_tracer { struct aws_allocator *traced_allocator; /* underlying allocator */ @@ -110,7 +110,7 @@ static void s_alloc_tracer_init( if (frames_per_stack > 128) { frames_per_stack = 128; } - tracer->frames_per_stack = (frames_per_stack) ? frames_per_stack : 8; + tracer->frames_per_stack = frames_per_stack ? frames_per_stack : 8; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_init( @@ -128,7 +128,7 @@ static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t struct alloc_info *alloc = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct alloc_info)); AWS_FATAL_ASSERT(alloc); alloc->size = size; - alloc->time = time(NULL); + aws_high_res_clock_get_ticks(&alloc->time); if (tracer->level == AWS_MEMTRACE_STACKS) { /* capture stack frames, skip 2 for this function and the allocation vtable function */ @@ -300,14 +300,14 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) { size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs); AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LS_COMMON_MEMTRACE, "################################################################################"); AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #\n"); + AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #"); AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LS_COMMON_MEMTRACE, "################################################################################"); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, - "tracer: %zu bytes still allocated in %zu allocations\n", + "tracer: %zu bytes still allocated in %zu allocations", aws_atomic_load_int(&tracer->allocated), num_allocs); @@ -333,21 +333,24 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) { &allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare)); aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs); /* dump allocs by time */ - AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n"); - AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); while (aws_priority_queue_size(&allocs)) { struct alloc_info *alloc = NULL; aws_priority_queue_pop(&allocs, &alloc); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n", alloc->size); if (alloc->stack) { struct aws_hash_element *item = NULL; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item)); struct stack_metadata *stack = item->value; - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, " stacktrace:\n%s\n", (const char *)aws_string_bytes(stack->trace)); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "ALLOC %zu bytes, stacktrace:\n%s\n", + alloc->size, + aws_string_c_str(stack->trace)); + } else { + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes", alloc->size); } } @@ -365,18 +368,18 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) { sizeof(struct stack_metadata *), s_stack_info_compare_size)); aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size); - AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, - "################################################################################\n"); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n"); - AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, - "################################################################################\n"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); while (aws_priority_queue_size(&stacks_by_size) > 0) { struct stack_metadata *stack = NULL; aws_priority_queue_pop(&stacks_by_size, &stack); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n", stack->size, stack->count); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace)); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "%zu bytes in %zu allocations:\n%s\n", + stack->size, + stack->count, + aws_string_c_str(stack->trace)); } aws_priority_queue_clean_up(&stacks_by_size); @@ -389,30 +392,30 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) { num_stacks, sizeof(struct stack_metadata *), s_stack_info_compare_count)); - AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, - "################################################################################\n"); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n"); - AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, - "################################################################################\n"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count); while (aws_priority_queue_size(&stacks_by_count) > 0) { struct stack_metadata *stack = NULL; aws_priority_queue_pop(&stacks_by_count, &stack); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n", stack->count, stack->size); - AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace)); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "%zu allocations leaking %zu bytes:\n%s\n", + stack->count, + stack->size, + aws_string_c_str(stack->trace)); } aws_priority_queue_clean_up(&stacks_by_count); aws_hash_table_clean_up(&stack_info); } AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LS_COMMON_MEMTRACE, "################################################################################"); AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #\n"); + AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #"); AWS_LOGF_TRACE( - AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LS_COMMON_MEMTRACE, "################################################################################"); aws_mutex_unlock(&tracer->mutex); } @@ -460,7 +463,7 @@ struct aws_allocator *aws_mem_tracer_new( enum aws_mem_trace_level level, size_t frames_per_stack) { - /* deprecated customizeable bookkeeping allocator */ + /* deprecated customizable bookkeeping allocator */ (void)deprecated; struct alloc_tracer *tracer = NULL; diff --git a/contrib/restricted/aws/aws-c-common/source/posix/file.c b/contrib/restricted/aws/aws-c-common/source/posix/file.c index 7c26ade8c36..e8b39c509c3 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/file.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/file.c @@ -5,47 +5,39 @@ #include <aws/common/environment.h> #include <aws/common/file.h> +#include <aws/common/logging.h> #include <aws/common/string.h> #include <dirent.h> #include <errno.h> +#include <pwd.h> #include <stdio.h> #include <sys/stat.h> #include <unistd.h> FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode) { - return fopen(aws_string_c_str(file_path), aws_string_c_str(mode)); -} - -static int s_parse_and_raise_error(int errno_cpy) { - if (errno_cpy == 0) { - return AWS_OP_SUCCESS; - } - - if (errno_cpy == ENOENT || errno_cpy == ENOTDIR) { - return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); - } - - if (errno_cpy == EMFILE || errno_cpy == ENFILE) { - return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED); - } - - if (errno_cpy == EACCES) { - return aws_raise_error(AWS_ERROR_NO_PERMISSION); + FILE *f = fopen(aws_string_c_str(file_path), aws_string_c_str(mode)); + if (!f) { + int errno_cpy = errno; /* Always cache errno before potential side-effect */ + aws_translate_and_raise_io_error(errno_cpy); + AWS_LOGF_ERROR( + AWS_LS_COMMON_IO, + "static: Failed to open file. path:'%s' mode:'%s' errno:%d aws-error:%d(%s)", + aws_string_c_str(file_path), + aws_string_c_str(mode), + errno_cpy, + aws_last_error(), + aws_error_name(aws_last_error())); } - - if (errno_cpy == ENOTEMPTY) { - return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY); - } - - return aws_raise_error(AWS_ERROR_UNKNOWN); + return f; } int aws_directory_create(const struct aws_string *dir_path) { int mkdir_ret = mkdir(aws_string_c_str(dir_path), S_IRWXU | S_IRWXG | S_IRWXO); + int errno_value = errno; /* Always cache errno before potential side-effect */ /** nobody cares if it already existed. */ - if (mkdir_ret != 0 && errno != EEXIST) { - return s_parse_and_raise_error(errno); + if (mkdir_ret != 0 && errno_value != EEXIST) { + return aws_translate_and_raise_io_error(errno_value); } return AWS_OP_SUCCESS; @@ -101,24 +93,27 @@ int aws_directory_delete(const struct aws_string *dir_path, bool recursive) { } int error_code = rmdir(aws_string_c_str(dir_path)); + int errno_value = errno; /* Always cache errno before potential side-effect */ - return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno); + return error_code == 0 ? AWS_OP_SUCCESS : aws_translate_and_raise_io_error(errno_value); } int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to) { int error_code = rename(aws_string_c_str(from), aws_string_c_str(to)); + int errno_value = errno; /* Always cache errno before potential side-effect */ - return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno); + return error_code == 0 ? AWS_OP_SUCCESS : aws_translate_and_raise_io_error(errno_value); } int aws_file_delete(const struct aws_string *file_path) { int error_code = unlink(aws_string_c_str(file_path)); + int errno_value = errno; /* Always cache errno before potential side-effect */ - if (!error_code || errno == ENOENT) { + if (!error_code || errno_value == ENOENT) { return AWS_OP_SUCCESS; } - return s_parse_and_raise_error(errno); + return aws_translate_and_raise_io_error(errno_value); } int aws_directory_traverse( @@ -128,9 +123,10 @@ int aws_directory_traverse( aws_on_directory_entry *on_entry, void *user_data) { DIR *dir = opendir(aws_string_c_str(path)); + int errno_value = errno; /* Always cache errno before potential side-effect */ if (!dir) { - return s_parse_and_raise_error(errno); + return aws_translate_and_raise_io_error(errno_value); } struct aws_byte_cursor current_path = aws_byte_cursor_from_string(path); @@ -227,13 +223,39 @@ AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME"); struct aws_string *aws_get_home_directory(struct aws_allocator *allocator) { - /* ToDo: check getpwuid_r if environment check fails */ - struct aws_string *home_env_var_value = NULL; - if (aws_get_environment_value(allocator, s_home_env_var, &home_env_var_value) == 0 && home_env_var_value != NULL) { - return home_env_var_value; + /* First, check "HOME" environment variable. + * If it's set, then return it, even if it's an empty string. */ + struct aws_string *home_value = NULL; + aws_get_environment_value(allocator, s_home_env_var, &home_value); + if (home_value != NULL) { + return home_value; + } + + /* Next, check getpwuid_r(). + * We need to allocate a tmp buffer to store the result strings, + * and the max possible size for this thing can be pretty big, + * so start with a reasonable allocation, and if that's not enough try something bigger. */ + uid_t uid = getuid(); /* cannot fail */ + struct passwd pwd; + struct passwd *result = NULL; + char *buf = NULL; + int status = ERANGE; + for (size_t bufsize = 1024; bufsize <= 16384 && status == ERANGE; bufsize *= 2) { + if (buf) { + aws_mem_release(allocator, buf); + } + buf = aws_mem_acquire(allocator, bufsize); + status = getpwuid_r(uid, &pwd, buf, bufsize, &result); + } + + if (status == 0 && result != NULL && result->pw_dir != NULL) { + home_value = aws_string_new_from_c_str(allocator, result->pw_dir); + } else { + aws_raise_error(AWS_ERROR_GET_HOME_DIRECTORY_FAILED); } - return NULL; + aws_mem_release(allocator, buf); + return home_value; } bool aws_path_exists(const struct aws_string *path) { @@ -251,10 +273,11 @@ int aws_fseek(FILE *file, int64_t offset, int whence) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } int result = fseek(file, offset, whence); -#endif /* AWS_HAVE_POSIX_LFS */ +#endif /* AWS_HAVE_POSIX_LFS */ + int errno_value = errno; /* Always cache errno before potential side-effect */ if (result != 0) { - return aws_translate_and_raise_io_error(errno); + return aws_translate_and_raise_io_error(errno_value); } return AWS_OP_SUCCESS; @@ -270,7 +293,8 @@ int aws_file_get_length(FILE *file, int64_t *length) { } if (fstat(fd, &file_stats)) { - return aws_translate_and_raise_io_error(errno); + int errno_value = errno; /* Always cache errno before potential side-effect */ + return aws_translate_and_raise_io_error(errno_value); } *length = file_stats.st_size; diff --git a/contrib/restricted/aws/aws-c-common/source/posix/thread.c b/contrib/restricted/aws/aws-c-common/source/posix/thread.c index d17d859c3b1..57d48aa9c70 100644 --- a/contrib/restricted/aws/aws-c-common/source/posix/thread.c +++ b/contrib/restricted/aws/aws-c-common/source/posix/thread.c @@ -23,9 +23,11 @@ #include <time.h> #include <unistd.h> -#if defined(__FreeBSD__) || defined(__NETBSD__) +#if defined(__FreeBSD__) || defined(__NetBSD__) # include <pthread_np.h> typedef cpuset_t cpu_set_t; +#elif defined(__OpenBSD__) +# include <pthread_np.h> #endif #if !defined(AWS_AFFINITY_METHOD) @@ -128,6 +130,8 @@ static void s_set_thread_name(pthread_t thread_id, const char *name) { pthread_setname_np(name); #elif defined(AWS_PTHREAD_SETNAME_TAKES_2ARGS) pthread_setname_np(thread_id, name); +#elif defined(AWS_PTHREAD_SET_NAME_TAKES_2ARGS) + pthread_set_name_np(thread_id, name); #elif defined(AWS_PTHREAD_SETNAME_TAKES_3ARGS) pthread_setname_np(thread_id, name, NULL); #else @@ -165,8 +169,9 @@ static void *thread_fn(void *arg) { * and makes sure the numa node of the cpu we launched this thread on is where memory gets allocated. However, * we don't want to fail the application if this fails, so make the call, and ignore the result. */ long resp = g_set_mempolicy_ptr(AWS_MPOL_PREFERRED_ALIAS, NULL, 0); + int errno_value = errno; /* Always cache errno before potential side-effect */ if (resp) { - AWS_LOGF_WARN(AWS_LS_COMMON_THREAD, "call to set_mempolicy() failed with errno %d", errno); + AWS_LOGF_WARN(AWS_LS_COMMON_THREAD, "call to set_mempolicy() failed with errno %d", errno_value); } } wrapper.func(wrapper.arg); @@ -274,7 +279,7 @@ int aws_thread_launch( /* AFAIK you can't set thread affinity on apple platforms, and it doesn't really matter since all memory * NUMA or not is setup in interleave mode. - * Thread afinity is also not supported on Android systems, and honestly, if you're running android on a NUMA + * Thread affinity is also not supported on Android systems, and honestly, if you're running android on a NUMA * configuration, you've got bigger problems. */ #if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR if (options->cpu_id >= 0) { @@ -460,3 +465,32 @@ int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data) tl_wrapper->atexit = cb; return AWS_OP_SUCCESS; } + +int aws_thread_current_name(struct aws_allocator *allocator, struct aws_string **out_name) { + return aws_thread_name(allocator, aws_thread_current_thread_id(), out_name); +} + +#define THREAD_NAME_BUFFER_SIZE 256 +int aws_thread_name(struct aws_allocator *allocator, aws_thread_id_t thread_id, struct aws_string **out_name) { + *out_name = NULL; +#if defined(AWS_PTHREAD_GETNAME_TAKES_2ARGS) || defined(AWS_PTHREAD_GETNAME_TAKES_3ARGS) || \ + defined(AWS_PTHREAD_GET_NAME_TAKES_2_ARGS) + char name[THREAD_NAME_BUFFER_SIZE] = {0}; +# ifdef AWS_PTHREAD_GETNAME_TAKES_3ARGS + if (pthread_getname_np(thread_id, name, THREAD_NAME_BUFFER_SIZE)) { +# elif AWS_PTHREAD_GETNAME_TAKES_2ARGS + if (pthread_getname_np(thread_id, name)) { +# elif AWS_PTHREAD_GET_NAME_TAKES_2ARGS + if (pthread_get_name_np(thread_id, name)) { +# endif + + return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + } + + *out_name = aws_string_new_from_c_str(allocator, name); + return AWS_OP_SUCCESS; +#else + + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif +} diff --git a/contrib/restricted/aws/aws-c-common/source/priority_queue.c b/contrib/restricted/aws/aws-c-common/source/priority_queue.c index f7d0f54e2d6..86a91feb3ae 100644 --- a/contrib/restricted/aws/aws-c-common/source/priority_queue.c +++ b/contrib/restricted/aws/aws-c-common/source/priority_queue.c @@ -59,7 +59,8 @@ static bool s_sift_down(struct aws_priority_queue *queue, size_t root) { size_t left = LEFT_OF(root); size_t right = RIGHT_OF(root); size_t first = root; - void *first_item = NULL, *other_item = NULL; + void *first_item = NULL; + void *other_item = NULL; aws_array_list_get_at_ptr(&queue->container, &first_item, root); aws_array_list_get_at_ptr(&queue->container, &other_item, left); @@ -100,7 +101,8 @@ static bool s_sift_up(struct aws_priority_queue *queue, size_t index) { bool did_move = false; - void *parent_item = NULL, *child_item = NULL; + void *parent_item = NULL; + void *child_item = NULL; size_t parent = PARENT_OF(index); while (index) { /* @@ -216,8 +218,8 @@ bool aws_priority_queue_backpointers_valid(const struct aws_priority_queue *cons /* Internal container validity */ bool backpointer_list_is_valid = - ((aws_array_list_is_valid(&queue->backpointers) && (queue->backpointers.current_size != 0) && - (queue->backpointers.data != NULL))); + (aws_array_list_is_valid(&queue->backpointers) && (queue->backpointers.current_size != 0) && + (queue->backpointers.data != NULL)); /* Backpointer struct should either be zero or should be * initialized to be at most as long as the container, and having diff --git a/contrib/restricted/aws/aws-c-common/source/uri.c b/contrib/restricted/aws/aws-c-common/source/uri.c index 05596414200..1fafc9492e6 100644 --- a/contrib/restricted/aws/aws-c-common/source/uri.c +++ b/contrib/restricted/aws/aws-c-common/source/uri.c @@ -11,7 +11,7 @@ #include <stdio.h> #include <string.h> -#if _MSC_VER +#ifdef _MSC_VER # pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif @@ -264,7 +264,7 @@ int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list } static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *str) { - uint8_t *location_of_colon = memchr(str->ptr, ':', str->len); + const uint8_t *location_of_colon = memchr(str->ptr, ':', str->len); if (!location_of_colon) { parser->state = ON_AUTHORITY; @@ -292,8 +292,8 @@ static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *st } static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor *str) { - uint8_t *location_of_slash = memchr(str->ptr, '/', str->len); - uint8_t *location_of_qmark = memchr(str->ptr, '?', str->len); + const uint8_t *location_of_slash = memchr(str->ptr, '/', str->len); + const uint8_t *location_of_qmark = memchr(str->ptr, '?', str->len); if (!location_of_slash && !location_of_qmark && str->len) { parser->uri->authority.ptr = str->ptr; @@ -309,7 +309,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); return; } else { - uint8_t *end = str->ptr + str->len; + const uint8_t *end = str->ptr + str->len; if (location_of_slash) { parser->state = ON_PATH; end = location_of_slash; @@ -325,7 +325,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor if (authority_parse_csr.len) { /* RFC-3986 section 3.2: authority = [ userinfo "@" ] host [ ":" port ] */ - uint8_t *userinfo_delim = memchr(authority_parse_csr.ptr, '@', authority_parse_csr.len); + const uint8_t *userinfo_delim = memchr(authority_parse_csr.ptr, '@', authority_parse_csr.len); if (userinfo_delim) { parser->uri->userinfo = @@ -351,7 +351,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor /* RFC-3986 section 3.2: host identified by IPv6 literal address is * enclosed within square brackets. We must ignore any colons within * IPv6 literals and only search for port delimiter after closing bracket.*/ - uint8_t *port_search_start = authority_parse_csr.ptr; + const uint8_t *port_search_start = authority_parse_csr.ptr; size_t port_search_len = authority_parse_csr.len; if (authority_parse_csr.len > 0 && authority_parse_csr.ptr[0] == '[') { port_search_start = memchr(authority_parse_csr.ptr, ']', authority_parse_csr.len); @@ -363,7 +363,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor port_search_len = authority_parse_csr.len - (port_search_start - authority_parse_csr.ptr); } - uint8_t *port_delim = memchr(port_search_start, ':', port_search_len); + const uint8_t *port_delim = memchr(port_search_start, ':', port_search_len); if (!port_delim) { parser->uri->port = 0; @@ -407,7 +407,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor static void s_parse_path(struct uri_parser *parser, struct aws_byte_cursor *str) { parser->uri->path_and_query = *str; - uint8_t *location_of_q_mark = memchr(str->ptr, '?', str->len); + const uint8_t *location_of_q_mark = memchr(str->ptr, '?', str->len); if (!location_of_q_mark) { parser->uri->path.ptr = str->ptr; @@ -540,8 +540,8 @@ static int s_encode_cursor_to_buffer( struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor, unchecked_append_canonicalized_character_fn *append_canonicalized_character) { - uint8_t *current_ptr = cursor->ptr; - uint8_t *end_ptr = cursor->ptr + cursor->len; + const uint8_t *current_ptr = cursor->ptr; + const uint8_t *end_ptr = cursor->ptr + cursor->len; /* * reserve room up front for the worst possible case: everything gets % encoded diff --git a/contrib/restricted/aws/aws-c-common/source/xml_parser.c b/contrib/restricted/aws/aws-c-common/source/xml_parser.c index 692324ac9a2..ac238cdfaf3 100644 --- a/contrib/restricted/aws/aws-c-common/source/xml_parser.c +++ b/contrib/restricted/aws/aws-c-common/source/xml_parser.c @@ -151,13 +151,13 @@ int aws_xml_parser_parse( /* burn everything that precedes the actual xml nodes. */ while (parser->doc.len) { - uint8_t *start = memchr(parser->doc.ptr, '<', parser->doc.len); + const uint8_t *start = memchr(parser->doc.ptr, '<', parser->doc.len); if (!start) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } - uint8_t *location = memchr(parser->doc.ptr, '>', parser->doc.len); + const uint8_t *location = memchr(parser->doc.ptr, '>', parser->doc.len); if (!location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); @@ -312,14 +312,14 @@ int aws_xml_node_traverse( /* look for the next node at the current level. do this until we encounter the parent node's * closing tag. */ while (!parser->stop_parsing && !parser->error) { - uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len); + const uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len); if (!next_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } - uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len); + const uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len); if (!end_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); @@ -409,14 +409,14 @@ int aws_xml_node_get_attribute( int s_node_next_sibling(struct aws_xml_parser *parser) { AWS_PRECONDITION(parser); - uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len); + const uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len); if (!next_location) { return parser->error; } aws_byte_cursor_advance(&parser->doc, next_location - parser->doc.ptr); - uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len); + const uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len); if (!end_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); |