diff options
author | robot-contrib <[email protected]> | 2022-09-16 21:59:36 +0300 |
---|---|---|
committer | robot-contrib <[email protected]> | 2022-09-16 21:59:36 +0300 |
commit | acd29eedcf99f811d7a2229a66bacdcce90a34c8 (patch) | |
tree | d9ffe7a884777b905b87e3053648758beb623497 /contrib/restricted/boost/container/src | |
parent | 1474d6ec81020fbf45ab0ac679448deb57e9678f (diff) |
Update boost/container, boost/move, boost/intrusive to 1.80.0
Diffstat (limited to 'contrib/restricted/boost/container/src')
7 files changed, 322 insertions, 253 deletions
diff --git a/contrib/restricted/boost/container/src/dlmalloc_2_8_6.c b/contrib/restricted/boost/container/src/dlmalloc_2_8_6.c index bb105b63fb9..3424f59b17d 100644 --- a/contrib/restricted/boost/container/src/dlmalloc_2_8_6.c +++ b/contrib/restricted/boost/container/src/dlmalloc_2_8_6.c @@ -1515,7 +1515,7 @@ LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); #pragma intrinsic (_InterlockedExchange) #define interlockedcompareexchange _InterlockedCompareExchange #define interlockedexchange _InterlockedExchange -#elif defined(WIN32) && defined(__GNUC__) +#elif defined(WIN32) && (defined(__GNUC__) || defined(__clang__)) #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) #define interlockedexchange __sync_lock_test_and_set #endif /* Win32 */ @@ -1762,7 +1762,7 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) { #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL #endif /* HAVE_MMAP && HAVE_MREMAP */ -/* mstate bit set if continguous morecore disabled or failed */ +/* mstate bit set if contiguous morecore disabled or failed */ #define USE_NONCONTIGUOUS_BIT (4U) /* segment bit set in create_mspace_with_base */ @@ -2726,7 +2726,7 @@ static int has_segment_link(mstate m, msegmentptr ss) { noncontiguous segments are added. */ #define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) /* ------------------------------- Hooks -------------------------------- */ @@ -4676,7 +4676,7 @@ void* dlmalloc(size_t bytes) { void dlfree(void* mem) { /* - Consolidate freed chunks with preceeding or succeeding bordering + Consolidate freed chunks with preceding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed with special cases for top, dv, mmapped chunks, and usage errors. */ @@ -6210,10 +6210,10 @@ History: Wolfram Gloger ([email protected]). * Use last_remainder in more cases. * Pack bins using idea from [email protected] - * Use ordered bins instead of best-fit threshhold + * Use ordered bins instead of best-fit threshold * Eliminate block-local decls to simplify tracing and debugging. * Support another case of realloc via move into top - * Fix error occuring when initial sbrk_base not word-aligned. + * Fix error occurring when initial sbrk_base not word-aligned. * Rely on page size for units instead of SBRK_UNIT to avoid surprises about sbrk alignment conventions. * Add mallinfo, mallopt. Thanks to Raymond Nijssen diff --git a/contrib/restricted/boost/container/src/dlmalloc_ext_2_8_6.c b/contrib/restricted/boost/container/src/dlmalloc_ext_2_8_6.c index 3328d729751..2bdd3afdf73 100644 --- a/contrib/restricted/boost/container/src/dlmalloc_ext_2_8_6.c +++ b/contrib/restricted/boost/container/src/dlmalloc_ext_2_8_6.c @@ -19,6 +19,8 @@ #define MSPACES 1 #define NO_MALLINFO 1 #define NO_MALLOC_STATS 1 +//disable sbrk as it's deprecated in some systems and weakens ASLR +#define HAVE_MORECORE 0 #if !defined(NDEBUG) @@ -33,7 +35,6 @@ #ifdef __GNUC__ #define FORCEINLINE inline #endif -#include "dlmalloc_2_8_6.c" #ifdef _MSC_VER #pragma warning (push) @@ -43,8 +44,13 @@ #pragma warning (disable : 4702) #pragma warning (disable : 4390) /*empty controlled statement found; is this the intent?*/ #pragma warning (disable : 4251 4231 4660) /*dll warnings*/ +#pragma warning (disable : 4057) /*differs in indirection to slightly different base types from*/ +#pragma warning (disable : 4702) /*unreachable code*/ +#pragma warning (disable : 4127) /*conditional expression is constant*/ #endif +#include "dlmalloc_2_8_6.c" + #define DL_SIZE_IMPL(p) (chunksize(mem2chunk(p)) - overhead_for(mem2chunk(p))) static size_t s_allocated_memory; @@ -80,7 +86,6 @@ static void mspace_free_lockless(mspace msp, void* mem) if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); - s_allocated_memory -= psize; if (!pinuse(p)) { size_t prevsize = p->prev_foot; if (is_mmapped(p)) { @@ -368,89 +373,6 @@ static mchunkptr try_realloc_chunk_with_min(mstate m, mchunkptr p, size_t min_nb return newp; } -#define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \ - *((void**)(THISMEM)) = *((void**)((NEXTMEM))) - -//This function is based on internal_bulk_free -//replacing iteration over array[] with boost_cont_memchain. -//Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes. -//After forward merging, backwards merging is also tried -static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain) -{ -#if FOOTERS - boost_cont_memchain ret_chain; - BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain); -#endif - if (!PREACTION(m)) { - boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain); - while(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */ - void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it); - mchunkptr a_p = mem2chunk(a_mem); - size_t psize = chunksize(a_p); -#if FOOTERS - if (get_mstate_for(a_p) != m) { - BOOST_CONTAINER_MEMIT_NEXT(a_it); - BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain, a_mem); - continue; - } -#endif - check_inuse_chunk(m, a_p); - if (RTCHECK(ok_address(m, a_p) && ok_inuse(a_p))) { - while(1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */ - boost_cont_memchain_it b_it = a_it; - BOOST_CONTAINER_MEMIT_NEXT(b_it); - if(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, b_it)){ - void *b_mem = BOOST_CONTAINER_MEMIT_ADDR(b_it); - mchunkptr b_p = mem2chunk(b_mem); - if (b_p == next_chunk(a_p)) { /* b chunk is contiguous and next so b's size can be added to a */ - psize += chunksize(b_p); - set_inuse(m, a_p, psize); - BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem, b_mem); - continue; - } - if(RTCHECK(ok_address(m, b_p) && ok_inuse(b_p))){ - /* b chunk is contiguous and previous so a's size can be added to b */ - if(a_p == next_chunk(b_p)) { - psize += chunksize(b_p); - set_inuse(m, b_p, psize); - a_it = b_it; - a_p = b_p; - a_mem = b_mem; - continue; - } - } - } - /* Normal deallocation starts again in the outer loop */ - a_it = b_it; - s_allocated_memory -= psize; - dispose_chunk(m, a_p, psize); - break; - } - } - else { - CORRUPTION_ERROR_ACTION(m); - break; - } - } - if (should_trim(m, m->topsize)) - sys_trim(m, 0); - POSTACTION(m); - } -#if FOOTERS - { - boost_cont_memchain_it last_pchain = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); - BOOST_CONTAINER_MEMCHAIN_INIT(pchain); - BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER - (pchain - , last_pchain - , BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain) - , BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain) - , BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain) - ); - } -#endif -} - /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// @@ -840,129 +762,233 @@ static int internal_shrink(mstate m, void* oldmem, size_t minbytes, size_t maxby mchunkptr remainder = chunk_plus_offset(oldp, nb); set_inuse(m, oldp, nb); set_inuse(m, remainder, rsize); + s_allocated_memory -= rsize; extra = chunk2mem(remainder); + mspace_free_lockless(m, extra); + check_inuse_chunk(m, oldp); } *received_size = nb - overhead_for(oldp); - if(!do_commit) - return 1; + return 1; } } } else { USAGE_ERROR_ACTION(m, oldmem); - return 0; - } - - if (extra != 0 && do_commit) { - mspace_free_lockless(m, extra); - check_inuse_chunk(m, oldp); - return 1; - } - else { - return 0; } + return 0; } } - #define INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM 4096 - #define SQRT_MAX_SIZE_T (((size_t)-1)>>(sizeof(size_t)*CHAR_BIT/2)) static int internal_node_multialloc - (mstate m, size_t n_elements, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) { - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - flag_t was_enabled; /* to disable mmap */ - size_t elements_per_segment = 0; - size_t element_req_size = request2size(element_size); - boost_cont_memchain_it prev_last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); - - /*Error if wrong element_size parameter */ - if( !element_size || - /*OR Error if n_elements less thatn contiguous_elements */ - ((contiguous_elements + 1) > (DL_MULTIALLOC_DEFAULT_CONTIGUOUS + 1) && n_elements < contiguous_elements) || - /* OR Error if integer overflow */ - (SQRT_MAX_SIZE_T < (element_req_size | contiguous_elements) && - (MAX_SIZE_T/element_req_size) < contiguous_elements)){ - return 0; - } - switch(contiguous_elements){ - case DL_MULTIALLOC_DEFAULT_CONTIGUOUS: - { - /* Default contiguous, just check that we can store at least one element */ - elements_per_segment = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM/element_req_size; - elements_per_segment += (size_t)(!elements_per_segment); - } - break; - case DL_MULTIALLOC_ALL_CONTIGUOUS: - /* All elements should be allocated in a single call */ - elements_per_segment = n_elements; - break; - default: - /* Allocate in chunks of "contiguous_elements" */ - elements_per_segment = contiguous_elements; - } +(mstate m, size_t n_elements, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) { + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + flag_t was_enabled; /* to disable mmap */ + size_t elements_per_segment = 0; + size_t element_req_size = request2size(element_size); + boost_cont_memchain_it prev_last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); + + /*Error if wrong element_size parameter */ + if (!element_size || + /*OR Error if n_elements less than contiguous_elements */ + ((contiguous_elements + 1) > (BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS + 1) && n_elements < contiguous_elements) || + /* OR Error if integer overflow */ + (SQRT_MAX_SIZE_T < (element_req_size | contiguous_elements) && + (MAX_SIZE_T / element_req_size) < contiguous_elements)) { + return 0; + } + switch (contiguous_elements) { + case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS: + { + /* Default contiguous, just check that we can store at least one element */ + elements_per_segment = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM / element_req_size; + elements_per_segment += (size_t)(!elements_per_segment); + } + break; + case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS: + /* All elements should be allocated in a single call */ + elements_per_segment = n_elements; + break; + default: + /* Allocate in chunks of "contiguous_elements" */ + elements_per_segment = contiguous_elements; + } + + { + size_t i; + size_t next_i; + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + for (i = 0; i != n_elements; i = next_i) + { + size_t accum_size; + size_t n_elements_left = n_elements - i; + next_i = i + ((n_elements_left < elements_per_segment) ? n_elements_left : elements_per_segment); + accum_size = element_req_size * (next_i - i); + + mem = mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD); + if (mem == 0) { + BOOST_CONTAINER_MEMIT_NEXT(prev_last_it); + while (i) { + void *addr = BOOST_CONTAINER_MEMIT_ADDR(prev_last_it); + --i; + BOOST_CONTAINER_MEMIT_NEXT(prev_last_it); + s_allocated_memory -= chunksize(mem2chunk(addr)); + mspace_free_lockless(m, addr); + } + if (was_enabled) + enable_mmap(m); + return 0; + } + p = mem2chunk(mem); + remainder_size = chunksize(p); + s_allocated_memory += remainder_size; + + assert(!is_mmapped(p)); + { /* split out elements */ + //void *mem_orig = mem; + //boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); + size_t num_elements = next_i - i; + + size_t num_loops = num_elements - 1; + remainder_size -= element_req_size * num_loops; + while (num_loops) { + --num_loops; + //void **mem_prev = ((void**)mem); + set_size_and_pinuse_of_inuse_chunk(m, p, element_req_size); + BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem); + p = chunk_plus_offset(p, element_req_size); + mem = chunk2mem(p); + //*mem_prev = mem; + } + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem); + //BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements); + } + } + if (was_enabled) + enable_mmap(m); + } + return 1; +} - { - size_t i; - size_t next_i; - /* - Allocate the aggregate chunk. First disable direct-mmapping so - malloc won't use it, since we would not be able to later - free/realloc space internal to a segregated mmap region. - */ - was_enabled = use_mmap(m); - disable_mmap(m); - for(i = 0; i != n_elements; i = next_i) - { - size_t accum_size; - size_t n_elements_left = n_elements - i; - next_i = i + ((n_elements_left < elements_per_segment) ? n_elements_left : elements_per_segment); - accum_size = element_req_size*(next_i - i); +#define BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC +#ifndef BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC - mem = mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD); - if (mem == 0){ - BOOST_CONTAINER_MEMIT_NEXT(prev_last_it); - while(i--){ - void *addr = BOOST_CONTAINER_MEMIT_ADDR(prev_last_it); - BOOST_CONTAINER_MEMIT_NEXT(prev_last_it); - mspace_free_lockless(m, addr); - } - if (was_enabled) - enable_mmap(m); - return 0; - } - p = mem2chunk(mem); - remainder_size = chunksize(p); - s_allocated_memory += remainder_size; +#define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \ + *((void**)(THISMEM)) = *((void**)((NEXTMEM))) - assert(!is_mmapped(p)); - { /* split out elements */ - void *mem_orig = mem; - boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); - size_t num_elements = next_i-i; +//This function is based on internal_bulk_free +//replacing iteration over array[] with boost_cont_memchain. +//Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes. +//After forward merging, backwards merging is also tried +static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain) +{ +#if FOOTERS + boost_cont_memchain ret_chain; + BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain); +#endif + if (!PREACTION(m)) { + boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain); + while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */ + void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it); + mchunkptr a_p = mem2chunk(a_mem); + size_t psize = chunksize(a_p); +#if FOOTERS + if (get_mstate_for(a_p) != m) { + BOOST_CONTAINER_MEMIT_NEXT(a_it); + BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain, a_mem); + continue; + } +#endif + check_inuse_chunk(m, a_p); + if (RTCHECK(ok_address(m, a_p) && ok_inuse(a_p))) { + while (1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */ + boost_cont_memchain_it b_it = a_it; + BOOST_CONTAINER_MEMIT_NEXT(b_it); + if (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, b_it)) { + void *b_mem = BOOST_CONTAINER_MEMIT_ADDR(b_it); + mchunkptr b_p = mem2chunk(b_mem); + if (b_p == next_chunk(a_p)) { /* b chunk is contiguous and next so b's size can be added to a */ + psize += chunksize(b_p); + set_inuse(m, a_p, psize); + BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem, b_mem); + continue; + } + if (RTCHECK(ok_address(m, b_p) && ok_inuse(b_p))) { + /* b chunk is contiguous and previous so a's size can be added to b */ + if (a_p == next_chunk(b_p)) { + psize += chunksize(b_p); + set_inuse(m, b_p, psize); + a_it = b_it; + a_p = b_p; + a_mem = b_mem; + continue; + } + } + } + /* Normal deallocation starts again in the outer loop */ + a_it = b_it; + s_allocated_memory -= psize; + dispose_chunk(m, a_p, psize); + break; + } + } + else { + CORRUPTION_ERROR_ACTION(m); + break; + } + } + if (should_trim(m, m->topsize)) + sys_trim(m, 0); + POSTACTION(m); + } +#if FOOTERS + { + boost_cont_memchain_it last_pchain = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); + BOOST_CONTAINER_MEMCHAIN_INIT(pchain); + BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER + (pchain + , last_pchain + , BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain) + , BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain) + , BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain) + ); + } +#endif +} - size_t num_loops = num_elements - 1; - remainder_size -= element_req_size*num_loops; - while(num_loops--){ - void **mem_prev = ((void**)mem); - set_size_and_pinuse_of_inuse_chunk(m, p, element_req_size); - p = chunk_plus_offset(p, element_req_size); - mem = chunk2mem(p); - *mem_prev = mem; - } - set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); - BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements); - } - } - if (was_enabled) - enable_mmap(m); - } - return 1; +#else //BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC + +//This function is based on internal_bulk_free +//replacing iteration over array[] with boost_cont_memchain. +//Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes. +//After forward merging, backwards merging is also tried +static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain) +{ + if (!PREACTION(m)) { + boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain); + while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */ + void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it); + BOOST_CONTAINER_MEMIT_NEXT(a_it); + s_allocated_memory -= chunksize(mem2chunk(a_mem)); + mspace_free_lockless(m, a_mem); + } + POSTACTION(m); + } } +#endif //BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC + static int internal_multialloc_arrays (mstate m, size_t n_elements, const size_t* sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) { void* mem; /* malloced aggregate space */ @@ -980,11 +1006,11 @@ static int internal_multialloc_arrays max_size = MAX_REQUEST/element_size; /* Different sizes*/ switch(contiguous_elements){ - case DL_MULTIALLOC_DEFAULT_CONTIGUOUS: + case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS: /* Use default contiguous mem */ boost_cont_multialloc_segmented_malloc_size = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM; break; - case DL_MULTIALLOC_ALL_CONTIGUOUS: + case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS: boost_cont_multialloc_segmented_malloc_size = MAX_REQUEST + CHUNK_OVERHEAD; break; default: @@ -1036,6 +1062,7 @@ static int internal_multialloc_arrays while(i--){ void *addr = BOOST_CONTAINER_MEMIT_ADDR(it); BOOST_CONTAINER_MEMIT_NEXT(it); + s_allocated_memory -= chunksize(mem2chunk(addr)); mspace_free_lockless(m, addr); } if (was_enabled) @@ -1143,6 +1170,8 @@ void boost_cont_free(void* mem) USAGE_ERROR_ACTION(ms,ms); } else if (!PREACTION(ms)) { + if(mem) + s_allocated_memory -= chunksize(mem2chunk(mem)); mspace_free_lockless(ms, mem); POSTACTION(ms); } diff --git a/contrib/restricted/boost/container/src/global_resource.cpp b/contrib/restricted/boost/container/src/global_resource.cpp index 15f4fe404cf..69a91c3cf02 100644 --- a/contrib/restricted/boost/container/src/global_resource.cpp +++ b/contrib/restricted/boost/container/src/global_resource.cpp @@ -10,10 +10,11 @@ #define BOOST_CONTAINER_SOURCE #include <boost/container/pmr/memory_resource.hpp> - +#include <boost/container/pmr/global_resource.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/container/throw_exception.hpp> #include <boost/container/detail/dlmalloc.hpp> //For global lock +#include <boost/container/detail/singleton.hpp> #include <cstddef> #include <new> @@ -27,58 +28,68 @@ class new_delete_resource_imp { public: - virtual ~new_delete_resource_imp() + ~new_delete_resource_imp() BOOST_OVERRIDE {} - virtual void* do_allocate(std::size_t bytes, std::size_t alignment) + void* do_allocate(std::size_t bytes, std::size_t alignment) BOOST_OVERRIDE { (void)bytes; (void)alignment; return new char[bytes]; } - virtual void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) + void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) BOOST_OVERRIDE { (void)bytes; (void)alignment; delete[]((char*)p); } - virtual bool do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT + bool do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT BOOST_OVERRIDE { return &other == this; } -} new_delete_resource_instance; +}; struct null_memory_resource_imp : public memory_resource { public: - virtual ~null_memory_resource_imp() + ~null_memory_resource_imp() BOOST_OVERRIDE {} - virtual void* do_allocate(std::size_t bytes, std::size_t alignment) + void* do_allocate(std::size_t bytes, std::size_t alignment) BOOST_OVERRIDE { (void)bytes; (void)alignment; + #if defined(BOOST_CONTAINER_USER_DEFINED_THROW_CALLBACKS) || defined(BOOST_NO_EXCEPTIONS) throw_bad_alloc(); return 0; + #else + throw std::bad_alloc(); + #endif } - virtual void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) + void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) BOOST_OVERRIDE { (void)p; (void)bytes; (void)alignment; } - virtual bool do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT + bool do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT BOOST_OVERRIDE { return &other == this; } -} null_memory_resource_instance; +}; BOOST_CONTAINER_DECL memory_resource* new_delete_resource() BOOST_NOEXCEPT { - return &new_delete_resource_instance; + return &boost::container::dtl::singleton_default<new_delete_resource_imp>::instance(); } BOOST_CONTAINER_DECL memory_resource* null_memory_resource() BOOST_NOEXCEPT { - return &null_memory_resource_instance; + return &boost::container::dtl::singleton_default<null_memory_resource_imp>::instance(); } -static memory_resource *default_memory_resource = &new_delete_resource_instance; +#if 1 + +static memory_resource *default_memory_resource = + &boost::container::dtl::singleton_default<new_delete_resource_imp>::instance(); BOOST_CONTAINER_DECL memory_resource* set_default_resource(memory_resource* r) BOOST_NOEXCEPT { - //TO-DO: synchronizes-with part using atomics if(dlmalloc_global_sync_lock()){ memory_resource *previous = default_memory_resource; + if(!previous){ + //function called before main, default_memory_resource is not initialized yet + previous = new_delete_resource(); + } default_memory_resource = r ? r : new_delete_resource(); dlmalloc_global_sync_unlock(); return previous; @@ -90,9 +101,12 @@ BOOST_CONTAINER_DECL memory_resource* set_default_resource(memory_resource* r) B BOOST_CONTAINER_DECL memory_resource* get_default_resource() BOOST_NOEXCEPT { - //TO-DO: synchronizes-with part using atomics if(dlmalloc_global_sync_lock()){ memory_resource *current = default_memory_resource; + if(!current){ + //function called before main, default_memory_resource is not initialized yet + current = new_delete_resource(); + } dlmalloc_global_sync_unlock(); return current; } @@ -101,6 +115,32 @@ BOOST_CONTAINER_DECL memory_resource* get_default_resource() BOOST_NOEXCEPT } } +#else // #if defined(BOOST_NO_CXX11_HDR_ATOMIC) + +} //namespace pmr { +} //namespace container { +} //namespace boost { + +#include <atomic> + +namespace boost { +namespace container { +namespace pmr { + +static std::atomic<memory_resource*> default_memory_resource = + ATOMIC_VAR_INIT(&boost::container::dtl::singleton_default<new_delete_resource_imp>::instance()); + +BOOST_CONTAINER_DECL memory_resource* set_default_resource(memory_resource* r) BOOST_NOEXCEPT +{ + memory_resource *const res = r ? r : new_delete_resource(); + return default_memory_resource.exchange(res, std::memory_order_acq_rel); +} + +BOOST_CONTAINER_DECL memory_resource* get_default_resource() BOOST_NOEXCEPT +{ return default_memory_resource.load(std::memory_order_acquire); } + +#endif + } //namespace pmr { } //namespace container { } //namespace boost { diff --git a/contrib/restricted/boost/container/src/monotonic_buffer_resource.cpp b/contrib/restricted/boost/container/src/monotonic_buffer_resource.cpp index f9f6f4cbe5e..5507ef2d56f 100644 --- a/contrib/restricted/boost/container/src/monotonic_buffer_resource.cpp +++ b/contrib/restricted/boost/container/src/monotonic_buffer_resource.cpp @@ -18,6 +18,7 @@ #include <boost/container/detail/min_max.hpp> #include <boost/intrusive/detail/math.hpp> #include <boost/container/throw_exception.hpp> +#include <new> #include <cstddef> @@ -63,6 +64,8 @@ monotonic_buffer_resource::monotonic_buffer_resource(memory_resource* upstream) , m_current_buffer(0) , m_current_buffer_size(0u) , m_next_buffer_size(initial_next_buffer_size) + , m_initial_buffer(0) + , m_initial_buffer_size(0u) {} monotonic_buffer_resource::monotonic_buffer_resource(std::size_t initial_size, memory_resource* upstream) BOOST_NOEXCEPT @@ -70,6 +73,8 @@ monotonic_buffer_resource::monotonic_buffer_resource(std::size_t initial_size, m , m_current_buffer(0) , m_current_buffer_size(0u) , m_next_buffer_size(minimum_buffer_size) + , m_initial_buffer(0) + , m_initial_buffer_size(0u) { //In case initial_size is zero this->increase_next_buffer_at_least_to(initial_size + !initial_size); } @@ -81,6 +86,8 @@ monotonic_buffer_resource::monotonic_buffer_resource(void* buffer, std::size_t b , m_next_buffer_size (bi::detail::previous_or_equal_pow2 (boost::container::dtl::max_value(buffer_size, std::size_t(initial_next_buffer_size)))) + , m_initial_buffer(buffer) + , m_initial_buffer_size(buffer_size) { this->increase_next_buffer(); } monotonic_buffer_resource::~monotonic_buffer_resource() @@ -89,8 +96,8 @@ monotonic_buffer_resource::~monotonic_buffer_resource() void monotonic_buffer_resource::release() BOOST_NOEXCEPT { m_memory_blocks.release(); - m_current_buffer = 0u; - m_current_buffer_size = 0u; + m_current_buffer = m_initial_buffer; + m_current_buffer_size = m_initial_buffer_size; m_next_buffer_size = initial_next_buffer_size; } @@ -129,12 +136,21 @@ void *monotonic_buffer_resource::allocate_from_current(std::size_t aligner, std: void* monotonic_buffer_resource::do_allocate(std::size_t bytes, std::size_t alignment) { - if(alignment > memory_resource::max_align) + if(alignment > memory_resource::max_align){ + (void)bytes; (void)alignment; + #if defined(BOOST_CONTAINER_USER_DEFINED_THROW_CALLBACKS) || defined(BOOST_NO_EXCEPTIONS) throw_bad_alloc(); + #else + throw std::bad_alloc(); + #endif + } //See if there is room in current buffer std::size_t aligner = 0u; if(this->remaining_storage(alignment, aligner) < bytes){ + //The new buffer will be aligned to the strictest alignment so reset + //the aligner, which was needed for the old buffer. + aligner = 0u; //Update next_buffer_size to at least bytes this->increase_next_buffer_at_least_to(bytes); //Now allocate and update internal data @@ -150,7 +166,7 @@ void monotonic_buffer_resource::do_deallocate(void* p, std::size_t bytes, std::s { (void)p; (void)bytes; (void)alignment; } bool monotonic_buffer_resource::do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT -{ return this == dynamic_cast<const monotonic_buffer_resource*>(&other); } +{ return this == &other; } } //namespace pmr { } //namespace container { diff --git a/contrib/restricted/boost/container/src/pool_resource.cpp b/contrib/restricted/boost/container/src/pool_resource.cpp index e6829e28e7b..82105a4520c 100644 --- a/contrib/restricted/boost/container/src/pool_resource.cpp +++ b/contrib/restricted/boost/container/src/pool_resource.cpp @@ -176,7 +176,7 @@ pool_resource::pool_resource(const pool_options& opts) BOOST_NOEXCEPT : m_options(opts), m_upstream(*get_default_resource()), m_oversized_list(), m_pool_data(), m_pool_count() { this->priv_constructor_body(); } -pool_resource::~pool_resource() //virtual +pool_resource::~pool_resource() { this->release(); @@ -203,7 +203,7 @@ memory_resource* pool_resource::upstream_resource() const pool_options pool_resource::options() const { return m_options; } -void* pool_resource::do_allocate(std::size_t bytes, std::size_t alignment) //virtual +void* pool_resource::do_allocate(std::size_t bytes, std::size_t alignment) { if(!m_pool_data){ this->priv_init_pools(); @@ -224,7 +224,7 @@ void* pool_resource::do_allocate(std::size_t bytes, std::size_t alignment) //vir } } -void pool_resource::do_deallocate(void* p, std::size_t bytes, std::size_t alignment) //virtual +void pool_resource::do_deallocate(void* p, std::size_t bytes, std::size_t alignment) { (void)alignment; //alignment ignored here, max_align is used by pools if(bytes > m_options.largest_required_pool_block){ @@ -237,10 +237,6 @@ void pool_resource::do_deallocate(void* p, std::size_t bytes, std::size_t alignm } } -bool pool_resource::do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT //virtual -{ return this == dynamic_cast<const pool_resource*>(&other); } - - std::size_t pool_resource::pool_count() const { if(BOOST_LIKELY((0 != m_pool_data))){ diff --git a/contrib/restricted/boost/container/src/synchronized_pool_resource.cpp b/contrib/restricted/boost/container/src/synchronized_pool_resource.cpp index b98bed4f638..21c40a9e5b8 100644 --- a/contrib/restricted/boost/container/src/synchronized_pool_resource.cpp +++ b/contrib/restricted/boost/container/src/synchronized_pool_resource.cpp @@ -11,31 +11,29 @@ #define BOOST_CONTAINER_SOURCE #include <boost/container/detail/config_begin.hpp> #include <boost/container/detail/workaround.hpp> -#include <boost/container/detail/dlmalloc.hpp> +#include <boost/container/detail/thread_mutex.hpp> #include <boost/container/pmr/synchronized_pool_resource.hpp> #include <cstddef> namespace { -using namespace boost::container; +using namespace boost::container::dtl; -class dlmalloc_sync_scoped_lock +class thread_mutex_lock { - void *m_sync; + thread_mutex &m_mut; public: - explicit dlmalloc_sync_scoped_lock(void *sync) - : m_sync(sync) + explicit thread_mutex_lock(thread_mutex &m) + : m_mut(m) { - if(!dlmalloc_sync_lock(m_sync)){ - throw_bad_alloc(); - } + m_mut.lock(); } - ~dlmalloc_sync_scoped_lock() + ~thread_mutex_lock() { - dlmalloc_sync_unlock(m_sync); + m_mut.unlock(); } }; @@ -46,32 +44,28 @@ namespace container { namespace pmr { synchronized_pool_resource::synchronized_pool_resource(const pool_options& opts, memory_resource* upstream) BOOST_NOEXCEPT - : m_pool_resource(opts, upstream), m_opaque_sync() + : m_mut(), m_pool_resource(opts, upstream) {} synchronized_pool_resource::synchronized_pool_resource() BOOST_NOEXCEPT - : m_pool_resource(), m_opaque_sync() + : m_mut(), m_pool_resource() {} synchronized_pool_resource::synchronized_pool_resource(memory_resource* upstream) BOOST_NOEXCEPT - : m_pool_resource(upstream), m_opaque_sync() + : m_mut(), m_pool_resource(upstream) {} synchronized_pool_resource::synchronized_pool_resource(const pool_options& opts) BOOST_NOEXCEPT - : m_pool_resource(opts), m_opaque_sync() + : m_mut(), m_pool_resource(opts) {} synchronized_pool_resource::~synchronized_pool_resource() //virtual -{ - if(m_opaque_sync) - dlmalloc_sync_destroy(m_opaque_sync); -} +{} void synchronized_pool_resource::release() { - if(m_opaque_sync){ //If there is no mutex, no allocation could be done - m_pool_resource.release(); - } + thread_mutex_lock lck(m_mut); (void)lck; + m_pool_resource.release(); } memory_resource* synchronized_pool_resource::upstream_resource() const @@ -82,24 +76,18 @@ pool_options synchronized_pool_resource::options() const void* synchronized_pool_resource::do_allocate(std::size_t bytes, std::size_t alignment) //virtual { - if(!m_opaque_sync){ //If there is no mutex, no allocation could be done - m_opaque_sync = dlmalloc_sync_create(); - if(!m_opaque_sync){ - throw_bad_alloc(); - } - } - dlmalloc_sync_scoped_lock lock(m_opaque_sync); (void)lock; + thread_mutex_lock lck(m_mut); (void)lck; return m_pool_resource.do_allocate(bytes, alignment); } void synchronized_pool_resource::do_deallocate(void* p, std::size_t bytes, std::size_t alignment) //virtual { - dlmalloc_sync_scoped_lock lock(m_opaque_sync); (void)lock; + thread_mutex_lock lck(m_mut); (void)lck; return m_pool_resource.do_deallocate(p, bytes, alignment); } bool synchronized_pool_resource::do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT //virtual -{ return this == dynamic_cast<const synchronized_pool_resource*>(&other); } +{ return this == &other; } std::size_t synchronized_pool_resource::pool_count() const { return m_pool_resource.pool_count(); } diff --git a/contrib/restricted/boost/container/src/unsynchronized_pool_resource.cpp b/contrib/restricted/boost/container/src/unsynchronized_pool_resource.cpp index 0c84f694a33..9ce7fef9b60 100644 --- a/contrib/restricted/boost/container/src/unsynchronized_pool_resource.cpp +++ b/contrib/restricted/boost/container/src/unsynchronized_pool_resource.cpp @@ -55,7 +55,7 @@ void unsynchronized_pool_resource::do_deallocate(void* p, std::size_t bytes, std { return m_resource.do_deallocate(p, bytes, alignment); } bool unsynchronized_pool_resource::do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT //virtual -{ return this == dynamic_cast<const unsynchronized_pool_resource*>(&other); } +{ return this == &other; } std::size_t unsynchronized_pool_resource::pool_count() const { return m_resource.pool_count(); } |