diff options
author | rdna <rdna@yandex-team.ru> | 2022-02-10 16:48:05 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:48:05 +0300 |
commit | 7804d69d166cc162c0be19dafd698a6ad7e42b25 (patch) | |
tree | 1a5e99bcef6e3f18d115f0a34d227d14178b6ce8 /contrib/libs/jemalloc | |
parent | 10ade5dcb952a8fae61f734485641a8409e1c545 (diff) | |
download | ydb-7804d69d166cc162c0be19dafd698a6ad7e42b25.tar.gz |
Restoring authorship annotation for <rdna@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/jemalloc')
36 files changed, 5862 insertions, 5862 deletions
diff --git a/contrib/libs/jemalloc/COPYING b/contrib/libs/jemalloc/COPYING index 3b7fd3585d..f228b01dd4 100644 --- a/contrib/libs/jemalloc/COPYING +++ b/contrib/libs/jemalloc/COPYING @@ -1,27 +1,27 @@ -Unless otherwise specified, files in the jemalloc source distribution are -subject to the following license: --------------------------------------------------------------------------------- +Unless otherwise specified, files in the jemalloc source distribution are +subject to the following license: +-------------------------------------------------------------------------------- Copyright (C) 2002-present Jason Evans <jasone@canonware.com>. -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. Copyright (C) 2009-present Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- diff --git a/contrib/libs/jemalloc/README b/contrib/libs/jemalloc/README index 3a6e0d2725..052b4dc385 100644 --- a/contrib/libs/jemalloc/README +++ b/contrib/libs/jemalloc/README @@ -1,20 +1,20 @@ -jemalloc is a general purpose malloc(3) implementation that emphasizes -fragmentation avoidance and scalable concurrency support. jemalloc first came -into use as the FreeBSD libc allocator in 2005, and since then it has found its -way into numerous applications that rely on its predictable behavior. In 2010 -jemalloc development efforts broadened to include developer support features +jemalloc is a general purpose malloc(3) implementation that emphasizes +fragmentation avoidance and scalable concurrency support. jemalloc first came +into use as the FreeBSD libc allocator in 2005, and since then it has found its +way into numerous applications that rely on its predictable behavior. In 2010 +jemalloc development efforts broadened to include developer support features such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, and therefore versatility remains critical. Ongoing development efforts trend toward making jemalloc among the best allocators for a broad range of demanding applications, and eliminating/mitigating weaknesses that have practical repercussions for real world applications. - -The COPYING file contains copyright and licensing information. - -The INSTALL file contains information on how to configure, build, and install -jemalloc. - -The ChangeLog file contains a brief summary of changes for each release. - + +The COPYING file contains copyright and licensing information. + +The INSTALL file contains information on how to configure, build, and install +jemalloc. + +The ChangeLog file contains a brief summary of changes for each release. + URL: http://jemalloc.net/ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h b/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h index cc254c56ac..827098e7f8 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/atomic.h @@ -1,8 +1,8 @@ #ifndef JEMALLOC_INTERNAL_ATOMIC_H #define JEMALLOC_INTERNAL_ATOMIC_H - + #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE - + #define JEMALLOC_U8_ATOMICS #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) # include "jemalloc/internal/atomic_gcc_atomic.h" @@ -21,7 +21,7 @@ #else # error "Don't have atomics implemented on this platform." #endif - + /* * This header gives more or less a backport of C11 atomics. The user can write * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate @@ -40,7 +40,7 @@ * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). */ - + /* * Pure convenience, so that we don't have to type "atomic_memory_order_" * quite so often. @@ -50,37 +50,37 @@ #define ATOMIC_RELEASE atomic_memory_order_release #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst - + /* * Not all platforms have 64-bit atomics. If we do, this #define exposes that * fact. */ -#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) +#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # define JEMALLOC_ATOMIC_U64 -#endif - +#endif + JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) - + /* * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only * platform that actually needs to know the size, MSVC. */ JEMALLOC_GENERATE_ATOMICS(bool, b, 0) - + JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) - + JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) - + JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) - + JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0) - + JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) - + #ifdef JEMALLOC_ATOMIC_U64 JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) -#endif - +#endif + #undef ATOMIC_INLINE - + #endif /* JEMALLOC_INTERNAL_ATOMIC_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h b/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h index c3f9cb490f..fd14cda060 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h @@ -1,13 +1,13 @@ #ifndef JEMALLOC_INTERNAL_BITMAP_H #define JEMALLOC_INTERNAL_BITMAP_H - + #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/sc.h" - -typedef unsigned long bitmap_t; + +typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG - + /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) /* Maximum bitmap bit count is determined by maximum regions per slab. */ @@ -18,11 +18,11 @@ typedef unsigned long bitmap_t; #endif #define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) -/* Number of bits per group. */ +/* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) - + /* * Do some analysis on how big the bitmap is before we use a tree. For a brute * force linear search, if we would have to call ffs_lu() more than 2^3 times, @@ -31,11 +31,11 @@ typedef unsigned long bitmap_t; #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define BITMAP_USE_TREE #endif - + /* Number of groups required to store a given number of bits. */ #define BITMAP_BITS2GROUPS(nbits) \ (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) - + /* * Number of groups required at a particular level for a given number of bits. */ @@ -145,40 +145,40 @@ typedef unsigned long bitmap_t; #endif /* BITMAP_USE_TREE */ typedef struct bitmap_level_s { - /* Offset of this level's groups within the array of groups. */ - size_t group_offset; + /* Offset of this level's groups within the array of groups. */ + size_t group_offset; } bitmap_level_t; - + typedef struct bitmap_info_s { - /* Logical number of bits in bitmap (stored at bottom level). */ - size_t nbits; - + /* Logical number of bits in bitmap (stored at bottom level). */ + size_t nbits; + #ifdef BITMAP_USE_TREE - /* Number of levels necessary for nbits. */ - unsigned nlevels; - - /* - * Only the first (nlevels+1) elements are used, and levels are ordered - * bottom to top (e.g. the bottom level is stored in levels[0]). - */ - bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; + /* Number of levels necessary for nbits. */ + unsigned nlevels; + + /* + * Only the first (nlevels+1) elements are used, and levels are ordered + * bottom to top (e.g. the bottom level is stored in levels[0]). + */ + bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; #else /* BITMAP_USE_TREE */ /* Number of groups necessary for nbits. */ size_t ngroups; #endif /* BITMAP_USE_TREE */ } bitmap_info_t; - + void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); size_t bitmap_size(const bitmap_info_t *binfo); - + static inline bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { #ifdef BITMAP_USE_TREE size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; - bitmap_t rg = bitmap[rgoff]; - /* The bitmap is full iff the root group is 0. */ - return (rg == 0); + bitmap_t rg = bitmap[rgoff]; + /* The bitmap is full iff the root group is 0. */ + return (rg == 0); #else size_t i; @@ -189,54 +189,54 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { } return true; #endif -} - +} + static inline bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { - size_t goff; - bitmap_t g; - - assert(bit < binfo->nbits); - goff = bit >> LG_BITMAP_GROUP_NBITS; - g = bitmap[goff]; + size_t goff; + bitmap_t g; + + assert(bit < binfo->nbits); + goff = bit >> LG_BITMAP_GROUP_NBITS; + g = bitmap[goff]; return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); -} - +} + static inline void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { - size_t goff; - bitmap_t *gp; - bitmap_t g; - - assert(bit < binfo->nbits); + size_t goff; + bitmap_t *gp; + bitmap_t g; + + assert(bit < binfo->nbits); assert(!bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit)); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit)); #ifdef BITMAP_USE_TREE - /* Propagate group state transitions up the tree. */ - if (g == 0) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; + /* Propagate group state transitions up the tree. */ + if (g == 0) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; + *gp = g; if (g != 0) { - break; + break; } - } - } + } + } #endif -} - +} + /* ffu: find first unset >= bit. */ static inline size_t bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { @@ -296,24 +296,24 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { #endif } -/* sfu: set first unset. */ +/* sfu: set first unset. */ static inline size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { - size_t bit; - bitmap_t g; - unsigned i; - + size_t bit; + bitmap_t g; + unsigned i; + assert(!bitmap_full(bitmap, binfo)); - + #ifdef BITMAP_USE_TREE - i = binfo->nlevels - 1; - g = bitmap[binfo->levels[i].group_offset]; + i = binfo->nlevels - 1; + g = bitmap[binfo->levels[i].group_offset]; bit = ffs_lu(g) - 1; - while (i > 0) { - i--; - g = bitmap[binfo->levels[i].group_offset + bit]; + while (i > 0) { + i--; + g = bitmap[binfo->levels[i].group_offset + bit]; bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); - } + } #else i = 0; g = bitmap[0]; @@ -323,47 +323,47 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { } bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); #endif - bitmap_set(bitmap, binfo, bit); + bitmap_set(bitmap, binfo, bit); return bit; -} - +} + static inline void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { - size_t goff; - bitmap_t *gp; - bitmap_t g; + size_t goff; + bitmap_t *gp; + bitmap_t g; UNUSED bool propagate; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - propagate = (g == 0); + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; + *gp = g; assert(!bitmap_get(bitmap, binfo, bit)); #ifdef BITMAP_USE_TREE - /* Propagate group state transitions up the tree. */ - if (propagate) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - propagate = (g == 0); + /* Propagate group state transitions up the tree. */ + if (propagate) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) - == 0); + == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; + *gp = g; if (!propagate) { - break; + break; } - } - } + } + } #endif /* BITMAP_USE_TREE */ -} - +} + #endif /* JEMALLOC_INTERNAL_BITMAP_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h b/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h index 7b3850bc16..728f64cca6 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/ckh.h @@ -1,74 +1,74 @@ #ifndef JEMALLOC_INTERNAL_CKH_H #define JEMALLOC_INTERNAL_CKH_H - + #include "jemalloc/internal/tsd.h" - + /* Cuckoo hashing implementation. Skip to the end for the interface. */ - + /******************************************************************************/ /* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ -/* Maintain counters used to get an idea of performance. */ +/* Maintain counters used to get an idea of performance. */ /* #define CKH_COUNT */ -/* Print counter values in ckh_delete() (requires CKH_COUNT). */ +/* Print counter values in ckh_delete() (requires CKH_COUNT). */ /* #define CKH_VERBOSE */ - -/* - * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit - * one bucket per L1 cache line. - */ + +/* + * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit + * one bucket per L1 cache line. + */ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) - + /* Typedefs to allow easy function pointer passing. */ typedef void ckh_hash_t (const void *, size_t[2]); typedef bool ckh_keycomp_t (const void *, const void *); - -/* Hash table cell. */ + +/* Hash table cell. */ typedef struct { const void *key; const void *data; } ckhc_t; - + /* The hash table itself. */ typedef struct { -#ifdef CKH_COUNT - /* Counters used to get an idea of performance. */ +#ifdef CKH_COUNT + /* Counters used to get an idea of performance. */ uint64_t ngrows; uint64_t nshrinks; uint64_t nshrinkfails; uint64_t ninserts; uint64_t nrelocs; -#endif - - /* Used for pseudo-random number generation. */ +#endif + + /* Used for pseudo-random number generation. */ uint64_t prng_state; - - /* Total number of items. */ + + /* Total number of items. */ size_t count; - - /* - * Minimum and current number of hash table buckets. There are - * 2^LG_CKH_BUCKET_CELLS cells per bucket. - */ + + /* + * Minimum and current number of hash table buckets. There are + * 2^LG_CKH_BUCKET_CELLS cells per bucket. + */ unsigned lg_minbuckets; unsigned lg_curbuckets; - - /* Hash and comparison functions. */ + + /* Hash and comparison functions. */ ckh_hash_t *hash; ckh_keycomp_t *keycomp; - - /* Hash table with 2^lg_curbuckets buckets. */ + + /* Hash table with 2^lg_curbuckets buckets. */ ckhc_t *tab; } ckh_t; - -/******************************************************************************/ + +/******************************************************************************/ /* BEGIN PUBLIC API */ /******************************************************************************/ - + /* Lifetime management. Minitems is the initial capacity. */ bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp); + ckh_keycomp_t *keycomp); void ckh_delete(tsd_t *tsd, ckh_t *ckh); /* Get the number of elements in the set. */ @@ -89,13 +89,13 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); */ bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data); + void **data); bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); - + /* Some useful hash and comparison functions for strings and pointers. */ void ckh_string_hash(const void *key, size_t r_hash[2]); bool ckh_string_keycomp(const void *k1, const void *k2); void ckh_pointer_hash(const void *key, size_t r_hash[2]); bool ckh_pointer_keycomp(const void *k1, const void *k2); - + #endif /* JEMALLOC_INTERNAL_CKH_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h b/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h index 1d1aacc6f4..da113801ca 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/ctl.h @@ -1,52 +1,52 @@ #ifndef JEMALLOC_INTERNAL_CTL_H #define JEMALLOC_INTERNAL_CTL_H - + #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/stats.h" - + /* Maximum ctl tree depth. */ #define CTL_MAX_DEPTH 7 - + typedef struct ctl_node_s { bool named; } ctl_node_t; - + typedef struct ctl_named_node_s { ctl_node_t node; const char *name; - /* If (nchildren == 0), this is a terminal node. */ + /* If (nchildren == 0), this is a terminal node. */ size_t nchildren; const ctl_node_t *children; int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, size_t); } ctl_named_node_t; - + typedef struct ctl_indexed_node_s { struct ctl_node_s node; const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, size_t); } ctl_indexed_node_t; - + typedef struct ctl_arena_stats_s { arena_stats_t astats; - - /* Aggregate stats for small size classes, based on bin stats. */ + + /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; uint64_t nfills_small; uint64_t nflushes_small; - + bin_stats_t bstats[SC_NBINS]; arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; arena_stats_extents_t estats[SC_NPSIZES]; } ctl_arena_stats_t; - + typedef struct ctl_stats_s { size_t allocated; size_t active; @@ -77,13 +77,13 @@ struct ctl_arena_s { /* NULL if !config_stats. */ ctl_arena_stats_t *astats; -}; - +}; + typedef struct ctl_arenas_s { uint64_t epoch; unsigned narenas; ql_head(ctl_arena_t) destroyed; - + /* * Element 0 corresponds to merged stats for extant arenas (accessed via * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for @@ -92,11 +92,11 @@ typedef struct ctl_arenas_s { */ ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; } ctl_arenas_t; - + int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, - void *newp, size_t newlen); + void *newp, size_t newlen); int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); - + int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); @@ -105,30 +105,30 @@ void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ - != 0) { \ - malloc_printf( \ - "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ - name); \ - abort(); \ - } \ -} while (0) - + if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_printf( \ + "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ +} while (0) + #define xmallctlnametomib(name, mibp, miblenp) do { \ - if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ - malloc_printf("<jemalloc>: Failure in " \ - "xmallctlnametomib(\"%s\", ...)\n", name); \ - abort(); \ - } \ -} while (0) - + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf("<jemalloc>: Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", name); \ + abort(); \ + } \ +} while (0) + #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ - newlen) != 0) { \ - malloc_write( \ - "<jemalloc>: Failure in xmallctlbymib()\n"); \ - abort(); \ - } \ -} while (0) - + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ + newlen) != 0) { \ + malloc_write( \ + "<jemalloc>: Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ +} while (0) + #endif /* JEMALLOC_INTERNAL_CTL_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/hash.h b/contrib/libs/jemalloc/include/jemalloc/internal/hash.h index 0270034e87..e59be2a4ce 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/hash.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/hash.h @@ -3,272 +3,272 @@ #include "jemalloc/internal/assert.h" -/* - * The following hash function is based on MurmurHash3, placed into the public +/* + * The following hash function is based on MurmurHash3, placed into the public * domain by Austin Appleby. See https://github.com/aappleby/smhasher for - * details. - */ - -/******************************************************************************/ -/* Internal implementation. */ + * details. + */ + +/******************************************************************************/ +/* Internal implementation. */ static inline uint32_t hash_rotl_32(uint32_t x, int8_t r) { return ((x << r) | (x >> (32 - r))); -} - +} + static inline uint64_t hash_rotl_64(uint64_t x, int8_t r) { return ((x << r) | (x >> (64 - r))); -} - +} + static inline uint32_t hash_get_block_32(const uint32_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { uint32_t ret; - + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); return ret; } return p[i]; -} - +} + static inline uint64_t hash_get_block_64(const uint64_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { uint64_t ret; - + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); return ret; } return p[i]; -} - +} + static inline uint32_t hash_fmix_32(uint32_t h) { - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; -} - +} + static inline uint64_t hash_fmix_64(uint64_t k) { - k ^= k >> 33; + k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); - k ^= k >> 33; + k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); - k ^= k >> 33; - + k ^= k >> 33; + return k; -} - +} + static inline uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 4; - - uint32_t h1 = seed; - - const uint32_t c1 = 0xcc9e2d51; - const uint32_t c2 = 0x1b873593; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i); - - k1 *= c1; - k1 = hash_rotl_32(k1, 15); - k1 *= c2; - - h1 ^= k1; - h1 = hash_rotl_32(h1, 13); - h1 = h1*5 + 0xe6546b64; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*4); - - uint32_t k1 = 0; - - switch (len & 3) { + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i); + + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = hash_rotl_32(h1, 13); + h1 = h1*5 + 0xe6546b64; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*4); + + uint32_t k1 = 0; + + switch (len & 3) { case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH - case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); - k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; - - h1 = hash_fmix_32(h1); - + case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); + k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; + + h1 = hash_fmix_32(h1); + return h1; -} - +} + static inline void -hash_x86_128(const void *key, const int len, uint32_t seed, +hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { - const uint8_t * data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint32_t h1 = seed; - uint32_t h2 = seed; - uint32_t h3 = seed; - uint32_t h4 = seed; - - const uint32_t c1 = 0x239b961b; - const uint32_t c2 = 0xab0e9789; - const uint32_t c3 = 0x38b34ae5; - const uint32_t c4 = 0xa1e38b93; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); - uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); - uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); - uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); - - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_32(h1, 19); h1 += h2; - h1 = h1*5 + 0x561ccd1b; - - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - h2 = hash_rotl_32(h2, 17); h2 += h3; - h2 = h2*5 + 0x0bcaa747; - - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - h3 = hash_rotl_32(h3, 15); h3 += h4; - h3 = h3*5 + 0x96cd1c35; - - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - h4 = hash_rotl_32(h4, 13); h4 += h1; - h4 = h4*5 + 0x32ac3b17; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*16); - uint32_t k1 = 0; - uint32_t k2 = 0; - uint32_t k3 = 0; - uint32_t k4 = 0; - - switch (len & 15) { + const uint8_t * data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); + uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); + uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); + uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); + + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_32(h1, 19); h1 += h2; + h1 = h1*5 + 0x561ccd1b; + + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + h2 = hash_rotl_32(h2, 17); h2 += h3; + h2 = h2*5 + 0x0bcaa747; + + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + h3 = hash_rotl_32(h3, 15); h3 += h4; + h3 = h3*5 + 0x96cd1c35; + + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + h4 = hash_rotl_32(h4, 13); h4 += h1; + h4 = h4*5 + 0x32ac3b17; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*16); + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch (len & 15) { case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH - case 13: k4 ^= tail[12] << 0; - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; JEMALLOC_FALLTHROUGH case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH - case 9: k3 ^= tail[ 8] << 0; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; JEMALLOC_FALLTHROUGH case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH - case 5: k2 ^= tail[ 4] << 0; - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; JEMALLOC_FALLTHROUGH case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH - case 1: k1 ^= tail[ 0] << 0; - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; JEMALLOC_FALLTHROUGH - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - h1 = hash_fmix_32(h1); - h2 = hash_fmix_32(h2); - h3 = hash_fmix_32(h3); - h4 = hash_fmix_32(h4); - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - r_out[0] = (((uint64_t) h2) << 32) | h1; - r_out[1] = (((uint64_t) h4) << 32) | h3; -} - + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = hash_fmix_32(h1); + h2 = hash_fmix_32(h2); + h3 = hash_fmix_32(h3); + h4 = hash_fmix_32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + r_out[0] = (((uint64_t) h2) << 32) | h1; + r_out[1] = (((uint64_t) h4) << 32) | h3; +} + static inline void -hash_x64_128(const void *key, const int len, const uint32_t seed, +hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint64_t h1 = seed; - uint64_t h2 = seed; - + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + const uint64_t c1 = KQU(0x87c37b91114253d5); const uint64_t c2 = KQU(0x4cf5ad432745937f); - - /* body */ - { - const uint64_t *blocks = (const uint64_t *) (data); - int i; - - for (i = 0; i < nblocks; i++) { - uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); - uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); - - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_64(h1, 27); h1 += h2; - h1 = h1*5 + 0x52dce729; - - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - h2 = hash_rotl_64(h2, 31); h2 += h1; - h2 = h2*5 + 0x38495ab5; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t*)(data + nblocks*16); - uint64_t k1 = 0; - uint64_t k2 = 0; - - switch (len & 15) { + + /* body */ + { + const uint64_t *blocks = (const uint64_t *) (data); + int i; + + for (i = 0; i < nblocks; i++) { + uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); + uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); + + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_64(h1, 27); h1 += h2; + h1 = h1*5 + 0x52dce729; + + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + h2 = hash_rotl_64(h2, 31); h2 += h1; + h2 = h2*5 + 0x38495ab5; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t*)(data + nblocks*16); + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch (len & 15) { case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH - case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; JEMALLOC_FALLTHROUGH case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH @@ -277,43 +277,43 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH - case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; - - h1 += h2; - h2 += h1; - - h1 = hash_fmix_64(h1); - h2 = hash_fmix_64(h2); - - h1 += h2; - h2 += h1; - - r_out[0] = h1; - r_out[1] = h2; -} - -/******************************************************************************/ -/* API. */ + case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = hash_fmix_64(h1); + h2 = hash_fmix_64(h2); + + h1 += h2; + h2 += h1; + + r_out[0] = h1; + r_out[1] = h2; +} + +/******************************************************************************/ +/* API. */ static inline void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ -#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) +#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); -#else +#else { uint64_t hashes[2]; hash_x86_128(key, (int)len, seed, hashes); r_hash[0] = (size_t)hashes[0]; r_hash[1] = (size_t)hashes[1]; } -#endif -} - +#endif +} + #endif /* JEMALLOC_INTERNAL_HASH_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h index 28cc151f07..64cbb5f19f 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h @@ -1,15 +1,15 @@ -/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ -#ifndef JEMALLOC_INTERNAL_DEFS_H_ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ #define JEMALLOC_INTERNAL_DEFS_H_ -/* - * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all - * public APIs to be prefixed. This makes it possible, with some care, to use - * multiple allocators simultaneously. - */ -/* #undef JEMALLOC_PREFIX */ -/* #undef JEMALLOC_CPREFIX */ - -/* +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +/* #undef JEMALLOC_PREFIX */ +/* #undef JEMALLOC_CPREFIX */ + +/* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ @@ -22,17 +22,17 @@ /* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */ /* - * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. - * For shared libraries, symbol visibility mechanisms prevent these symbols - * from being exported, but for static libraries, naming collisions are a real - * possibility. - */ -#define JEMALLOC_PRIVATE_NAMESPACE je_ - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. - */ + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ #if defined(__i386__) || defined(__amd64__) #define CPU_SPINWAIT __asm__ volatile("pause") /* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ @@ -41,14 +41,14 @@ #define CPU_SPINWAIT #define HAVE_CPU_SPINWAIT 0 #endif - + /* * Number of significant bits in virtual addresses. This may be less than the * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 * bits are the same as bit 47. */ #define LG_VADDR 48 - + /* Defined if C11 atomics are available. */ #define JEMALLOC_C11_ATOMICS 1 @@ -62,36 +62,36 @@ /* and the 8-bit variant support. */ #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 -/* +/* * Defined if __builtin_clz() and __builtin_clzl() are available. - */ + */ #define JEMALLOC_HAVE_BUILTIN_CLZ - -/* + +/* * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. - */ + */ /* #undef JEMALLOC_OS_UNFAIR_LOCK */ - + /* Defined if syscall(2) is usable. */ #define JEMALLOC_USE_SYSCALL -/* +/* * Defined if secure_getenv(3) is available. - */ + */ #define JEMALLOC_HAVE_SECURE_GETENV - -/* + +/* * Defined if issetugid(2) is available. - */ + */ /* #undef JEMALLOC_HAVE_ISSETUGID */ - + /* Defined if pthread_atfork(3) is available. */ #define JEMALLOC_HAVE_PTHREAD_ATFORK /* Defined if pthread_setname_np(3) is available. */ /* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */ -/* +/* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ #define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1 @@ -107,98 +107,98 @@ /* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ /* - * Defined if _malloc_thread_cleanup() exists. At least in the case of - * FreeBSD, pthread_key_create() allocates, which if used during malloc - * bootstrapping will cause recursion into the pthreads library. Therefore, if - * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in - * malloc_tsd. - */ -/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ - -/* - * Defined if threaded initialization is known to be safe on this platform. - * Among other things, it must be possible to initialize a mutex without - * triggering allocation in order for threaded allocation to be safe. - */ + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ #define JEMALLOC_THREADED_INIT - -/* - * Defined if the pthreads implementation defines - * _pthread_mutex_init_calloc_cb(), in which case the function is used in order - * to avoid recursive allocation during mutex initialization. - */ -/* #undef JEMALLOC_MUTEX_INIT_CB */ - -/* Non-empty if the tls_model attribute is supported. */ + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) - -/* - * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -/* #undef JEMALLOC_DEBUG */ - -/* JEMALLOC_STATS enables statistics calculation. */ + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +/* #undef JEMALLOC_DEBUG */ + +/* JEMALLOC_STATS enables statistics calculation. */ #define JEMALLOC_STATS - + /* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */ /* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */ -/* JEMALLOC_PROF enables allocation profiling. */ +/* JEMALLOC_PROF enables allocation profiling. */ #define JEMALLOC_PROF - -/* Use libunwind for profile backtracing if defined. */ + +/* Use libunwind for profile backtracing if defined. */ #define JEMALLOC_PROF_LIBUNWIND - -/* Use libgcc for profile backtracing if defined. */ + +/* Use libgcc for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_LIBGCC */ - -/* Use gcc intrinsics for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_GCC */ - -/* + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage - * segment (DSS). - */ + * segment (DSS). + */ #define JEMALLOC_DSS - + /* Support memory filling (junk/zero). */ #define JEMALLOC_FILL - -/* Support utrace(2)-based tracing. */ -/* #undef JEMALLOC_UTRACE */ - -/* Support optional abort() on OOM. */ -/* #undef JEMALLOC_XMALLOC */ - -/* Support lazy locking (avoid locking unless a second thread is launched). */ -/* #undef JEMALLOC_LAZY_LOCK */ - + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ /* #undef LG_QUANTUM */ - + /* One page is 2^LG_PAGE bytes. */ #define LG_PAGE 12 -/* +/* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require * explicit huge page support are separately configured. - */ + */ #define LG_HUGEPAGE 21 - -/* + +/* * If defined, adjacent virtual memory mappings with identical attributes * automatically coalesce, and they fragment when changes are made to subranges. * This is the normal order of things for mmap()/munmap(), but on Windows * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. * mappings do *not* coalesce/fragment. - */ + */ #define JEMALLOC_MAPS_COALESCE - + /* * If defined, retain memory for later reuse by default rather than using e.g. * munmap() to unmap freed extents. This is enabled on 64-bit Linux because @@ -207,16 +207,16 @@ */ #define JEMALLOC_RETAIN -/* TLS is used to map arenas and magazine caches to threads. */ +/* TLS is used to map arenas and magazine caches to threads. */ #define JEMALLOC_TLS - -/* + +/* * Used to mark unreachable code to quiet "end of non-void" compiler warnings. * Don't use this directly; instead use unreachable() from util.h - */ + */ #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable - -/* + +/* * ffs*() functions to use for bitmapping. Don't use these directly; instead, * use ffs_*() from util.h. */ @@ -249,11 +249,11 @@ /* #undef JEMALLOC_READLINKAT */ /* - * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. - */ -/* #undef JEMALLOC_ZONE */ - -/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* * Methods for determining whether the OS overcommits. * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's * /proc/sys/vm.overcommit_memory file. @@ -272,28 +272,28 @@ #define JEMALLOC_HAVE_MADVISE_HUGE /* - * Methods for purging unused pages differ between operating systems. - * + * Methods for purging unused pages differ between operating systems. + * * madvise(..., MADV_FREE) : This marks pages as being unused, such that they * will be discarded rather than swapped out. * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is * defined, this immediately discards pages, - * such that new pages will be demand-zeroed if + * such that new pages will be demand-zeroed if * the address region is later touched; * otherwise this behaves similarly to * MADV_FREE, though typically with higher * system overhead. - */ + */ #define JEMALLOC_PURGE_MADVISE_FREE #define JEMALLOC_PURGE_MADVISE_DONTNEED #define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS - + /* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ #define JEMALLOC_DEFINE_MADVISE_FREE -/* +/* * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. - */ + */ #define JEMALLOC_MADVISE_DONTDUMP /* @@ -303,26 +303,26 @@ /* #undef JEMALLOC_THP */ /* Define if operating system has alloca.h header. */ -#define JEMALLOC_HAS_ALLOCA_H 1 - -/* C99 restrict keyword supported. */ -#define JEMALLOC_HAS_RESTRICT 1 - -/* For use by hash code. */ -/* #undef JEMALLOC_BIG_ENDIAN */ - -/* sizeof(int) == 2^LG_SIZEOF_INT. */ -#define LG_SIZEOF_INT 2 - -/* sizeof(long) == 2^LG_SIZEOF_LONG. */ -#define LG_SIZEOF_LONG 3 - +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +#define JEMALLOC_HAS_RESTRICT 1 + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG 3 + /* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ #define LG_SIZEOF_LONG_LONG 3 -/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ -#define LG_SIZEOF_INTMAX_T 3 - +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ #define JEMALLOC_GLIBC_MALLOC_HOOK @@ -369,4 +369,4 @@ /* Performs additional safety checks when defined. */ /* #undef JEMALLOC_OPT_SAFETY_CHECKS */ -#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h index fda39748b8..b1f9c8ef23 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h @@ -1,5 +1,5 @@ -#pragma once - +#pragma once + #if defined(__APPLE__) && defined(__arm64__) # include "jemalloc_internal_defs-osx-arm64.h" #elif defined(__APPLE__) @@ -7,5 +7,5 @@ #elif defined(_MSC_VER) # include "jemalloc_internal_defs-win.h" #else -# include "jemalloc_internal_defs-linux.h" -#endif +# include "jemalloc_internal_defs-linux.h" +#endif diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h index d8ea06f6d0..316d0bd07a 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h @@ -1,17 +1,17 @@ #ifndef JEMALLOC_INTERNAL_MACROS_H #define JEMALLOC_INTERNAL_MACROS_H - + #ifdef JEMALLOC_DEBUG # define JEMALLOC_ALWAYS_INLINE static inline -#else +#else # define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline -#endif +#endif #ifdef _MSC_VER # define inline _inline #endif - + #define UNUSED JEMALLOC_ATTR(unused) - + #define ZU(z) ((size_t)z) #define ZD(z) ((ssize_t)z) #define QU(q) ((uint64_t)q) @@ -22,13 +22,13 @@ #define KQU(q) QU(q##ULL) #define KQD(q) QI(q##LL) -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + #if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) -# define restrict -#endif +# define restrict +#endif /* Various function pointers are static and immutable except during testing. */ #ifdef JEMALLOC_JET diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h b/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h index 7c24f0725e..42e10d39cf 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/mutex.h @@ -1,6 +1,6 @@ #ifndef JEMALLOC_INTERNAL_MUTEX_H #define JEMALLOC_INTERNAL_MUTEX_H - + #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/tsd.h" @@ -16,7 +16,7 @@ typedef enum { malloc_mutex_address_ordered } malloc_mutex_lock_order_t; -typedef struct malloc_mutex_s malloc_mutex_t; +typedef struct malloc_mutex_s malloc_mutex_t; struct malloc_mutex_s { union { struct { @@ -29,7 +29,7 @@ struct malloc_mutex_s { * unlocking thread). */ mutex_prof_data_t prof_data; -#ifdef _WIN32 +#ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 SRWLOCK lock; # else @@ -37,10 +37,10 @@ struct malloc_mutex_s { # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; -#else +#else pthread_mutex_t lock; #endif /* @@ -78,11 +78,11 @@ struct malloc_mutex_s { # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) -# else +# else # define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) -# endif +# endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) @@ -91,13 +91,13 @@ struct malloc_mutex_s { # define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) # define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) # define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) -#endif - +#endif + #define LOCK_PROF_DATA_INITIALIZER \ {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ ATOMIC_INIT(0), 0, NULL, 0} - -#ifdef _WIN32 + +#ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # if defined(JEMALLOC_DEBUG) @@ -109,7 +109,7 @@ struct malloc_mutex_s { {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} # endif -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) # if (defined(JEMALLOC_DEBUG)) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \ @@ -120,7 +120,7 @@ struct malloc_mutex_s { WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} # endif -#else +#else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # if defined(JEMALLOC_DEBUG) # define MALLOC_MUTEX_INITIALIZER \ @@ -131,15 +131,15 @@ struct malloc_mutex_s { {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} # endif -#endif - -#ifdef JEMALLOC_LAZY_LOCK -extern bool isthreaded; -#else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true -#endif - +#endif + +#ifdef JEMALLOC_LAZY_LOCK +extern bool isthreaded; +#else +# undef isthreaded /* Undo private_namespace.h definition. */ +# define isthreaded true +#endif + bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, malloc_mutex_lock_order_t lock_order); void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); @@ -147,20 +147,20 @@ void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); bool malloc_mutex_boot(void); void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); - + void malloc_mutex_lock_slow(malloc_mutex_t *mutex); - + static inline void malloc_mutex_lock_final(malloc_mutex_t *mutex) { MALLOC_MUTEX_LOCK(mutex); atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); } - + static inline bool malloc_mutex_trylock_final(malloc_mutex_t *mutex) { return MALLOC_MUTEX_TRYLOCK(mutex); } - + static inline void mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (config_stats) { @@ -177,18 +177,18 @@ mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { static inline bool malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); - if (isthreaded) { + if (isthreaded) { if (malloc_mutex_trylock_final(mutex)) { atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); return true; } mutex_owner_stats_update(tsdn, mutex); - } + } witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); return false; -} - +} + /* Aggregate lock prof data. */ static inline void malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { @@ -196,7 +196,7 @@ malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { nstime_copy(&sum->max_wait_time, &data->max_wait_time); } - + sum->n_wait_times += data->n_wait_times; sum->n_spin_acquired += data->n_spin_acquired; @@ -216,16 +216,16 @@ malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { static inline void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); - if (isthreaded) { + if (isthreaded) { if (malloc_mutex_trylock_final(mutex)) { malloc_mutex_lock_slow(mutex); atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); } mutex_owner_stats_update(tsdn, mutex); - } + } witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); -} - +} + static inline void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED); diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/prng.h b/contrib/libs/jemalloc/include/jemalloc/internal/prng.h index 15cc2d18fa..3720c7cdd9 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/prng.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/prng.h @@ -1,34 +1,34 @@ #ifndef JEMALLOC_INTERNAL_PRNG_H #define JEMALLOC_INTERNAL_PRNG_H - + #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bit_util.h" -/* - * Simple linear congruential pseudo-random number generator: - * - * prng(y) = (a*x + c) % m - * - * where the following constants ensure maximal period: - * - * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. - * c == Odd number (relatively prime to 2^n). - * m == 2^32 - * - * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. - * - * This choice of m has the disadvantage that the quality of the bits is +/* + * Simple linear congruential pseudo-random number generator: + * + * prng(y) = (a*x + c) % m + * + * where the following constants ensure maximal period: + * + * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. + * c == Odd number (relatively prime to 2^n). + * m == 2^32 + * + * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. + * + * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example, the lowest bit has a cycle of 2, - * the next has a cycle of 4, etc. For this reason, we prefer to use the upper - * bits. - */ - -/******************************************************************************/ + * the next has a cycle of 4, etc. For this reason, we prefer to use the upper + * bits. + */ + +/******************************************************************************/ /* INTERNAL DEFINITIONS -- IGNORE */ -/******************************************************************************/ +/******************************************************************************/ #define PRNG_A_32 UINT32_C(1103515241) #define PRNG_C_32 UINT32_C(12347) - + #define PRNG_A_64 UINT64_C(6364136223846793005) #define PRNG_C_64 UINT64_C(1442695040888963407) @@ -53,10 +53,10 @@ prng_state_next_zu(size_t state) { #endif } -/******************************************************************************/ +/******************************************************************************/ /* BEGIN PUBLIC API */ /******************************************************************************/ - + /* * The prng_lg_range functions give a uniform int in the half-open range [0, * 2**lg_range). If atomic is true, they do so safely from multiple threads. diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/ql.h b/contrib/libs/jemalloc/include/jemalloc/internal/ql.h index 8029040771..ff35ab028d 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/ql.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/ql.h @@ -5,84 +5,84 @@ /* List definitions. */ #define ql_head(a_type) \ -struct { \ - a_type *qlh_first; \ -} - +struct { \ + a_type *qlh_first; \ +} + #define ql_head_initializer(a_head) {NULL} - + #define ql_elm(a_type) qr(a_type) - -/* List functions. */ + +/* List functions. */ #define ql_new(a_head) do { \ - (a_head)->qlh_first = NULL; \ -} while (0) - + (a_head)->qlh_first = NULL; \ +} while (0) + #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) - + #define ql_first(a_head) ((a_head)->qlh_first) - + #define ql_last(a_head, a_field) \ - ((ql_first(a_head) != NULL) \ - ? qr_prev(ql_first(a_head), a_field) : NULL) - + ((ql_first(a_head) != NULL) \ + ? qr_prev(ql_first(a_head), a_field) : NULL) + #define ql_next(a_head, a_elm, a_field) \ - ((ql_last(a_head, a_field) != (a_elm)) \ - ? qr_next((a_elm), a_field) : NULL) - + ((ql_last(a_head, a_field) != (a_elm)) \ + ? qr_next((a_elm), a_field) : NULL) + #define ql_prev(a_head, a_elm, a_field) \ - ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ - : NULL) - + ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ + : NULL) + #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ - qr_before_insert((a_qlelm), (a_elm), a_field); \ - if (ql_first(a_head) == (a_qlelm)) { \ - ql_first(a_head) = (a_elm); \ - } \ -} while (0) - + qr_before_insert((a_qlelm), (a_elm), a_field); \ + if (ql_first(a_head) == (a_qlelm)) { \ + ql_first(a_head) = (a_elm); \ + } \ +} while (0) + #define ql_after_insert(a_qlelm, a_elm, a_field) \ - qr_after_insert((a_qlelm), (a_elm), a_field) - + qr_after_insert((a_qlelm), (a_elm), a_field) + #define ql_head_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = (a_elm); \ -} while (0) - + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = (a_elm); \ +} while (0) + #define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = qr_next((a_elm), a_field); \ -} while (0) - + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = qr_next((a_elm), a_field); \ +} while (0) + #define ql_remove(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) == (a_elm)) { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ - } \ - if (ql_first(a_head) != (a_elm)) { \ - qr_remove((a_elm), a_field); \ - } else { \ - ql_first(a_head) = NULL; \ - } \ -} while (0) - + if (ql_first(a_head) == (a_elm)) { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } \ + if (ql_first(a_head) != (a_elm)) { \ + qr_remove((a_elm), a_field); \ + } else { \ + ql_first(a_head) = NULL; \ + } \ +} while (0) + #define ql_head_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_first(a_head); \ - ql_remove((a_head), t, a_field); \ -} while (0) - + a_type *t = ql_first(a_head); \ + ql_remove((a_head), t, a_field); \ +} while (0) + #define ql_tail_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_last(a_head, a_field); \ - ql_remove((a_head), t, a_field); \ -} while (0) - + a_type *t = ql_last(a_head, a_field); \ + ql_remove((a_head), t, a_field); \ +} while (0) + #define ql_foreach(a_var, a_head, a_field) \ - qr_foreach((a_var), ql_first(a_head), a_field) - + qr_foreach((a_var), ql_first(a_head), a_field) + #define ql_reverse_foreach(a_var, a_head, a_field) \ - qr_reverse_foreach((a_var), ql_first(a_head), a_field) + qr_reverse_foreach((a_var), ql_first(a_head), a_field) #endif /* JEMALLOC_INTERNAL_QL_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/qr.h b/contrib/libs/jemalloc/include/jemalloc/internal/qr.h index 1e1056b386..3fa5f6f010 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/qr.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/qr.h @@ -1,72 +1,72 @@ #ifndef JEMALLOC_INTERNAL_QR_H #define JEMALLOC_INTERNAL_QR_H -/* Ring definitions. */ +/* Ring definitions. */ #define qr(a_type) \ -struct { \ - a_type *qre_next; \ - a_type *qre_prev; \ -} - -/* Ring functions. */ +struct { \ + a_type *qre_next; \ + a_type *qre_prev; \ +} + +/* Ring functions. */ #define qr_new(a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) - + #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) - + #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qrelm); \ - (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ - (a_qrelm)->a_field.qre_prev = (a_qr); \ -} while (0) - + (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qrelm); \ + (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ + (a_qrelm)->a_field.qre_prev = (a_qr); \ +} while (0) + #define qr_after_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ - (a_qr)->a_field.qre_prev = (a_qrelm); \ - (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ - (a_qrelm)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ + (a_qr)->a_field.qre_prev = (a_qrelm); \ + (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ + (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) - + #define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ a_type *t; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - t = (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = t; \ -} while (0) - + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + t = (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = t; \ +} while (0) + /* * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ #define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_type, a_field) - + #define qr_remove(a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev->a_field.qre_next \ - = (a_qr)->a_field.qre_next; \ - (a_qr)->a_field.qre_next->a_field.qre_prev \ - = (a_qr)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - + (a_qr)->a_field.qre_prev->a_field.qre_next \ + = (a_qr)->a_field.qre_next; \ + (a_qr)->a_field.qre_next->a_field.qre_prev \ + = (a_qr)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + #define qr_foreach(var, a_qr, a_field) \ - for ((var) = (a_qr); \ - (var) != NULL; \ - (var) = (((var)->a_field.qre_next != (a_qr)) \ - ? (var)->a_field.qre_next : NULL)) - + for ((var) = (a_qr); \ + (var) != NULL; \ + (var) = (((var)->a_field.qre_next != (a_qr)) \ + ? (var)->a_field.qre_next : NULL)) + #define qr_reverse_foreach(var, a_qr, a_field) \ - for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ - (var) != NULL; \ - (var) = (((var) != (a_qr)) \ - ? (var)->a_field.qre_prev : NULL)) + for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ + (var) != NULL; \ + (var) = (((var) != (a_qr)) \ + ? (var)->a_field.qre_prev : NULL)) #endif /* JEMALLOC_INTERNAL_QR_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/rb.h b/contrib/libs/jemalloc/include/jemalloc/internal/rb.h index 47fa5ca99b..9c1022683a 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/rb.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/rb.h @@ -1,87 +1,87 @@ -/*- - ******************************************************************************* - * - * cpp macro implementation of left-leaning 2-3 red-black trees. Parent - * pointers are not used, and color bits are stored in the least significant - * bit of right-child pointers (if RB_COMPACT is defined), thus making node - * linkage as compact as is possible for red-black trees. - * - * Usage: - * - * #include <stdint.h> - * #include <stdbool.h> - * #define NDEBUG // (Optional, see assert(3).) - * #include <assert.h> - * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) - * #include <rb.h> - * ... - * - ******************************************************************************* - */ - -#ifndef RB_H_ +/*- + ******************************************************************************* + * + * cpp macro implementation of left-leaning 2-3 red-black trees. Parent + * pointers are not used, and color bits are stored in the least significant + * bit of right-child pointers (if RB_COMPACT is defined), thus making node + * linkage as compact as is possible for red-black trees. + * + * Usage: + * + * #include <stdint.h> + * #include <stdbool.h> + * #define NDEBUG // (Optional, see assert(3).) + * #include <assert.h> + * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) + * #include <rb.h> + * ... + * + ******************************************************************************* + */ + +#ifndef RB_H_ #define RB_H_ - + #ifndef __PGI #define RB_COMPACT #endif -#ifdef RB_COMPACT -/* Node structure. */ +#ifdef RB_COMPACT +/* Node structure. */ #define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right_red; \ -} -#else +struct { \ + a_type *rbn_left; \ + a_type *rbn_right_red; \ +} +#else #define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right; \ - bool rbn_red; \ -} -#endif - -/* Root structure. */ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right; \ + bool rbn_red; \ +} +#endif + +/* Root structure. */ #define rb_tree(a_type) \ -struct { \ - a_type *rbt_root; \ -} - -/* Left accessors. */ +struct { \ + a_type *rbt_root; \ +} + +/* Left accessors. */ #define rbtn_left_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_left) + ((a_node)->a_field.rbn_left) #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ - (a_node)->a_field.rbn_left = a_left; \ -} while (0) - -#ifdef RB_COMPACT -/* Right accessors. */ + (a_node)->a_field.rbn_left = a_left; \ +} while (0) + +#ifdef RB_COMPACT +/* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ - ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ - & ((ssize_t)-2))) + ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ + & ((ssize_t)-2))) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ - | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ -} while (0) - -/* Color accessors. */ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ + | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ +} while (0) + +/* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ - ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ - & ((size_t)1))) + ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ + & ((size_t)1))) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ - | ((ssize_t)a_red)); \ -} while (0) + (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ + | ((ssize_t)a_red)); \ +} while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ - (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ -} while (0) + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ + (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ +} while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ -} while (0) + (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ +} while (0) /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ @@ -91,153 +91,153 @@ struct { \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) -#else -/* Right accessors. */ +#else +/* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_right) + ((a_node)->a_field.rbn_right) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right = a_right; \ -} while (0) - -/* Color accessors. */ + (a_node)->a_field.rbn_right = a_right; \ +} while (0) + +/* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_red) + ((a_node)->a_field.rbn_red) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_red = (a_red); \ -} while (0) + (a_node)->a_field.rbn_red = (a_red); \ +} while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = true; \ -} while (0) + (a_node)->a_field.rbn_red = true; \ +} while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = false; \ -} while (0) - -/* Node initializer. */ + (a_node)->a_field.rbn_red = false; \ +} while (0) + +/* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) #endif - -/* Tree initializer. */ + +/* Tree initializer. */ #define rb_new(a_type, a_field, a_rbt) do { \ (a_rbt)->rbt_root = NULL; \ -} while (0) - -/* Internal utility macros. */ +} while (0) + +/* Internal utility macros. */ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ + (r_node) = (a_root); \ if ((r_node) != NULL) { \ - for (; \ + for (; \ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ - (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - + (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ + (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - + } \ + } \ +} while (0) + #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ - rbtn_right_set(a_type, a_field, (a_node), \ - rbtn_left_get(a_type, a_field, (r_node))); \ - rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - + (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ + rbtn_right_set(a_type, a_field, (a_node), \ + rbtn_left_get(a_type, a_field, (r_node))); \ + rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ - rbtn_left_set(a_type, a_field, (a_node), \ - rbtn_right_get(a_type, a_field, (r_node))); \ - rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -/* - * The rb_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to rb_gen(). - */ - + (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ + rbtn_left_set(a_type, a_field, (a_node), \ + rbtn_right_get(a_type, a_field, (r_node))); \ + rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +/* + * The rb_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to rb_gen(). + */ + #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree); \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ -a_attr a_type * \ +a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ -a_attr a_type * \ +a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg); \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg); \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg); - -/* - * The rb_gen() macro generates a type-specific red-black tree implementation, - * based on the above cpp macros. - * - * Arguments: - * - * a_attr : Function attribute for generated functions (ex: static). - * a_prefix : Prefix for generated functions (ex: ex_). - * a_rb_type : Type for red-black tree data structure (ex: ex_t). - * a_type : Type for red-black tree node data structure (ex: ex_node_t). - * a_field : Name of red-black tree node linkage (ex: ex_link). - * a_cmp : Node comparison function name, with the following prototype: - * int (a_cmp *)(a_type *a_node, a_type *a_other); - * ^^^^^^ - * or a_key + +/* + * The rb_gen() macro generates a type-specific red-black tree implementation, + * based on the above cpp macros. + * + * Arguments: + * + * a_attr : Function attribute for generated functions (ex: static). + * a_prefix : Prefix for generated functions (ex: ex_). + * a_rb_type : Type for red-black tree data structure (ex: ex_t). + * a_type : Type for red-black tree node data structure (ex: ex_node_t). + * a_field : Name of red-black tree node linkage (ex: ex_link). + * a_cmp : Node comparison function name, with the following prototype: + * int (a_cmp *)(a_type *a_node, a_type *a_other); + * ^^^^^^ + * or a_key * Interpretation of comparison function return values: - * -1 : a_node < a_other - * 0 : a_node == a_other - * 1 : a_node > a_other - * In all cases, the a_node or a_key macro argument is the first - * argument to the comparison function, which makes it possible - * to write comparison functions that treat the first argument - * specially. - * - * Assuming the following setup: - * - * typedef struct ex_node_s ex_node_t; - * struct ex_node_s { - * rb_node(ex_node_t) ex_link; - * }; - * typedef rb_tree(ex_node_t) ex_t; - * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) - * - * The following API is generated: - * - * static void - * ex_new(ex_t *tree); - * Description: Initialize a red-black tree structure. - * Args: - * tree: Pointer to an uninitialized red-black tree object. - * + * -1 : a_node < a_other + * 0 : a_node == a_other + * 1 : a_node > a_other + * In all cases, the a_node or a_key macro argument is the first + * argument to the comparison function, which makes it possible + * to write comparison functions that treat the first argument + * specially. + * + * Assuming the following setup: + * + * typedef struct ex_node_s ex_node_t; + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * }; + * typedef rb_tree(ex_node_t) ex_t; + * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) + * + * The following API is generated: + * + * static void + * ex_new(ex_t *tree); + * Description: Initialize a red-black tree structure. + * Args: + * tree: Pointer to an uninitialized red-black tree object. + * * static bool * ex_empty(ex_t *tree); * Description: Determine whether tree is empty. @@ -245,85 +245,85 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ * tree: Pointer to an initialized red-black tree object. * Ret: True if tree is empty, false otherwise. * - * static ex_node_t * - * ex_first(ex_t *tree); - * static ex_node_t * - * ex_last(ex_t *tree); - * Description: Get the first/last node in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * Ret: First/last node in tree, or NULL if tree is empty. - * - * static ex_node_t * - * ex_next(ex_t *tree, ex_node_t *node); - * static ex_node_t * - * ex_prev(ex_t *tree, ex_node_t *node); - * Description: Get node's successor/predecessor. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: A node in tree. - * Ret: node's successor/predecessor in tree, or NULL if node is - * last/first. - * - * static ex_node_t * + * static ex_node_t * + * ex_first(ex_t *tree); + * static ex_node_t * + * ex_last(ex_t *tree); + * Description: Get the first/last node in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: First/last node in tree, or NULL if tree is empty. + * + * static ex_node_t * + * ex_next(ex_t *tree, ex_node_t *node); + * static ex_node_t * + * ex_prev(ex_t *tree, ex_node_t *node); + * Description: Get node's successor/predecessor. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: A node in tree. + * Ret: node's successor/predecessor in tree, or NULL if node is + * last/first. + * + * static ex_node_t * * ex_search(ex_t *tree, const ex_node_t *key); - * Description: Search for node that matches key. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or NULL if no match. - * - * static ex_node_t * + * Description: Search for node that matches key. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or NULL if no match. + * + * static ex_node_t * * ex_nsearch(ex_t *tree, const ex_node_t *key); - * static ex_node_t * + * static ex_node_t * * ex_psearch(ex_t *tree, const ex_node_t *key); - * Description: Search for node that matches key. If no match is found, - * return what would be key's successor/predecessor, were - * key in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or if no match, hypothetical node's - * successor/predecessor (NULL if no successor/predecessor). - * - * static void - * ex_insert(ex_t *tree, ex_node_t *node); - * Description: Insert node into tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node to be inserted into tree. - * - * static void - * ex_remove(ex_t *tree, ex_node_t *node); - * Description: Remove node from tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node in tree to be removed. - * - * static ex_node_t * - * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * static ex_node_t * - * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * Description: Iterate forward/backward over tree, starting at node. If - * tree is modified, iteration must be immediately - * terminated by the callback function that causes the - * modification. - * Args: - * tree : Pointer to an initialized red-black tree object. - * start: Node at which to start iteration, or NULL to start at - * first/last node. - * cb : Callback function, which is called for each node during - * iteration. Under normal circumstances the callback function - * should return NULL, which causes iteration to continue. If a - * callback function returns non-NULL, iteration is immediately - * terminated and the non-NULL return value is returned by the - * iterator. This is useful for re-starting iteration after - * modifying tree. - * arg : Opaque pointer passed to cb(). - * Ret: NULL if iteration completed, or the non-NULL callback return value - * that caused termination of the iteration. + * Description: Search for node that matches key. If no match is found, + * return what would be key's successor/predecessor, were + * key in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or if no match, hypothetical node's + * successor/predecessor (NULL if no successor/predecessor). + * + * static void + * ex_insert(ex_t *tree, ex_node_t *node); + * Description: Insert node into tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node to be inserted into tree. + * + * static void + * ex_remove(ex_t *tree, ex_node_t *node); + * Description: Remove node from tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node in tree to be removed. + * + * static ex_node_t * + * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * static ex_node_t * + * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * Description: Iterate forward/backward over tree, starting at node. If + * tree is modified, iteration must be immediately + * terminated by the callback function that causes the + * modification. + * Args: + * tree : Pointer to an initialized red-black tree object. + * start: Node at which to start iteration, or NULL to start at + * first/last node. + * cb : Callback function, which is called for each node during + * iteration. Under normal circumstances the callback function + * should return NULL, which causes iteration to continue. If a + * callback function returns non-NULL, iteration is immediately + * terminated and the non-NULL return value is returned by the + * iterator. This is useful for re-starting iteration after + * modifying tree. + * arg : Opaque pointer passed to cb(). + * Ret: NULL if iteration completed, or the non-NULL callback return value + * that caused termination of the iteration. * * static void * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); @@ -338,646 +338,646 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ * during iteration. There is no way to stop iteration once it * has begun. * arg : Opaque pointer passed to cb(). - */ + */ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree) { \ - rb_new(a_type, a_field, rbtree); \ -} \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree) { \ + rb_new(a_type, a_field, rbtree); \ +} \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree) { \ return (rbtree->rbt_root == NULL); \ } \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return ret; \ -} \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ +} \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return ret; \ -} \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ +} \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ if (rbtn_right_get(a_type, a_field, node) != NULL) { \ - rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ + rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ assert(tnode != NULL); \ - } \ - } \ + } \ + } \ return ret; \ -} \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ +} \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ if (rbtn_left_get(a_type, a_field, node) != NULL) { \ - rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ + rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ assert(tnode != NULL); \ - } \ - } \ + } \ + } \ return ret; \ -} \ -a_attr a_type * \ +} \ +a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - int cmp; \ - ret = rbtree->rbt_root; \ + a_type *ret; \ + int cmp; \ + ret = rbtree->rbt_root; \ while (ret != NULL \ - && (cmp = (a_cmp)(key, ret)) != 0) { \ - if (cmp < 0) { \ - ret = rbtn_left_get(a_type, a_field, ret); \ - } else { \ - ret = rbtn_right_get(a_type, a_field, ret); \ - } \ - } \ + && (cmp = (a_cmp)(key, ret)) != 0) { \ + if (cmp < 0) { \ + ret = rbtn_left_get(a_type, a_field, ret); \ + } else { \ + ret = rbtn_right_get(a_type, a_field, ret); \ + } \ + } \ return ret; \ -} \ -a_attr a_type * \ +} \ +a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ return ret; \ -} \ -a_attr a_type * \ +} \ +a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ return ret; \ -} \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } path[sizeof(void *) << 4], *pathp; \ - rbt_node_new(a_type, a_field, rbtree, node); \ - /* Wind. */ \ - path->node = rbtree->rbt_root; \ +} \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } path[sizeof(void *) << 4], *pathp; \ + rbt_node_new(a_type, a_field, rbtree, node); \ + /* Wind. */ \ + path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - assert(cmp != 0); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - } \ - } \ - pathp->node = node; \ - /* Unwind. */ \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - a_type *cnode = pathp->node; \ - if (pathp->cmp < 0) { \ - a_type *left = pathp[1].node; \ - rbtn_left_set(a_type, a_field, cnode, left); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + assert(cmp != 0); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + } \ + } \ + pathp->node = node; \ + /* Unwind. */ \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + a_type *cnode = pathp->node; \ + if (pathp->cmp < 0) { \ + a_type *left = pathp[1].node; \ + rbtn_left_set(a_type, a_field, cnode, left); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ - /* Fix up 4-node. */ \ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, cnode, tnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } else { \ - a_type *right = pathp[1].node; \ - rbtn_right_set(a_type, a_field, cnode, right); \ - if (rbtn_red_get(a_type, a_field, right)) { \ - a_type *left = rbtn_left_get(a_type, a_field, cnode); \ + /* Fix up 4-node. */ \ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, cnode, tnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } else { \ + a_type *right = pathp[1].node; \ + rbtn_right_set(a_type, a_field, cnode, right); \ + if (rbtn_red_get(a_type, a_field, right)) { \ + a_type *left = rbtn_left_get(a_type, a_field, cnode); \ if (left != NULL && rbtn_red_get(a_type, a_field, \ left)) { \ - /* Split 4-node. */ \ - rbtn_black_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, right); \ - rbtn_red_set(a_type, a_field, cnode); \ - } else { \ - /* Lean left. */ \ - a_type *tnode; \ - bool tred = rbtn_red_get(a_type, a_field, cnode); \ - rbtn_rotate_left(a_type, a_field, cnode, tnode); \ - rbtn_color_set(a_type, a_field, tnode, tred); \ - rbtn_red_set(a_type, a_field, cnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } \ - pathp->node = cnode; \ - } \ - /* Set root, and make it black. */ \ - rbtree->rbt_root = path->node; \ - rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ -} \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } *pathp, *nodep, path[sizeof(void *) << 4]; \ - /* Wind. */ \ - nodep = NULL; /* Silence compiler warning. */ \ - path->node = rbtree->rbt_root; \ + /* Split 4-node. */ \ + rbtn_black_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, right); \ + rbtn_red_set(a_type, a_field, cnode); \ + } else { \ + /* Lean left. */ \ + a_type *tnode; \ + bool tred = rbtn_red_get(a_type, a_field, cnode); \ + rbtn_rotate_left(a_type, a_field, cnode, tnode); \ + rbtn_color_set(a_type, a_field, tnode, tred); \ + rbtn_red_set(a_type, a_field, cnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } \ + pathp->node = cnode; \ + } \ + /* Set root, and make it black. */ \ + rbtree->rbt_root = path->node; \ + rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ +} \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } *pathp, *nodep, path[sizeof(void *) << 4]; \ + /* Wind. */ \ + nodep = NULL; /* Silence compiler warning. */ \ + path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - if (cmp == 0) { \ - /* Find node's successor, in preparation for swap. */ \ - pathp->cmp = 1; \ - nodep = pathp; \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + if (cmp == 0) { \ + /* Find node's successor, in preparation for swap. */ \ + pathp->cmp = 1; \ + nodep = pathp; \ for (pathp++; pathp->node != NULL; pathp++) { \ - pathp->cmp = -1; \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } \ - break; \ - } \ - } \ - } \ - assert(nodep->node == node); \ - pathp--; \ - if (pathp->node != node) { \ - /* Swap node with its successor. */ \ - bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ - rbtn_color_set(a_type, a_field, pathp->node, \ - rbtn_red_get(a_type, a_field, node)); \ - rbtn_left_set(a_type, a_field, pathp->node, \ - rbtn_left_get(a_type, a_field, node)); \ - /* If node's successor is its right child, the following code */\ - /* will do the wrong thing for the right child pointer. */\ - /* However, it doesn't matter, because the pointer will be */\ - /* properly set when the successor is pruned. */\ - rbtn_right_set(a_type, a_field, pathp->node, \ - rbtn_right_get(a_type, a_field, node)); \ - rbtn_color_set(a_type, a_field, node, tred); \ - /* The pruned leaf node's child pointers are never accessed */\ - /* again, so don't bother setting them to nil. */\ - nodep->node = pathp->node; \ - pathp->node = node; \ - if (nodep == path) { \ - rbtree->rbt_root = nodep->node; \ - } else { \ - if (nodep[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } else { \ - rbtn_right_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } \ - } \ - } else { \ - a_type *left = rbtn_left_get(a_type, a_field, node); \ + pathp->cmp = -1; \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } \ + break; \ + } \ + } \ + } \ + assert(nodep->node == node); \ + pathp--; \ + if (pathp->node != node) { \ + /* Swap node with its successor. */ \ + bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ + rbtn_color_set(a_type, a_field, pathp->node, \ + rbtn_red_get(a_type, a_field, node)); \ + rbtn_left_set(a_type, a_field, pathp->node, \ + rbtn_left_get(a_type, a_field, node)); \ + /* If node's successor is its right child, the following code */\ + /* will do the wrong thing for the right child pointer. */\ + /* However, it doesn't matter, because the pointer will be */\ + /* properly set when the successor is pruned. */\ + rbtn_right_set(a_type, a_field, pathp->node, \ + rbtn_right_get(a_type, a_field, node)); \ + rbtn_color_set(a_type, a_field, node, tred); \ + /* The pruned leaf node's child pointers are never accessed */\ + /* again, so don't bother setting them to nil. */\ + nodep->node = pathp->node; \ + pathp->node = node; \ + if (nodep == path) { \ + rbtree->rbt_root = nodep->node; \ + } else { \ + if (nodep[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } else { \ + rbtn_right_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } \ + } \ + } else { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ if (left != NULL) { \ - /* node has no successor, but it has a left child. */\ - /* Splice node out, without losing the left child. */\ + /* node has no successor, but it has a left child. */\ + /* Splice node out, without losing the left child. */\ assert(!rbtn_red_get(a_type, a_field, node)); \ - assert(rbtn_red_get(a_type, a_field, left)); \ - rbtn_black_set(a_type, a_field, left); \ - if (pathp == path) { \ - rbtree->rbt_root = left; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - left); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - left); \ - } \ - } \ - return; \ - } else if (pathp == path) { \ - /* The tree only contained one node. */ \ + assert(rbtn_red_get(a_type, a_field, left)); \ + rbtn_black_set(a_type, a_field, left); \ + if (pathp == path) { \ + rbtree->rbt_root = left; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + left); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + left); \ + } \ + } \ + return; \ + } else if (pathp == path) { \ + /* The tree only contained one node. */ \ rbtree->rbt_root = NULL; \ - return; \ - } \ - } \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - /* Prune red node, which requires no fixup. */ \ - assert(pathp[-1].cmp < 0); \ + return; \ + } \ + } \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + /* Prune red node, which requires no fixup. */ \ + assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ - return; \ - } \ - /* The node to be pruned is black, so unwind until balance is */\ - /* restored. */\ + return; \ + } \ + /* The node to be pruned is black, so unwind until balance is */\ + /* restored. */\ pathp->node = NULL; \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - assert(pathp->cmp != 0); \ - if (pathp->cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - a_type *tnode; \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + assert(pathp->cmp != 0); \ + if (pathp->cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + a_type *tnode; \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ - /* In the following diagrams, ||, //, and \\ */\ - /* indicate the path to the removed node. */\ - /* */\ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - /* */\ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - /* */\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ + /* In the following diagrams, ||, //, and \\ */\ + /* indicate the path to the removed node. */\ + /* */\ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + /* */\ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + /* */\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, rightleft); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, rightleft); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - a_type *tnode; \ - rbtn_red_set(a_type, a_field, pathp->node); \ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - pathp->node = tnode; \ - } \ - } \ - } else { \ - a_type *left; \ - rbtn_right_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - left = rbtn_left_get(a_type, a_field, pathp->node); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *tnode; \ - a_type *leftright = rbtn_right_get(a_type, a_field, \ - left); \ - a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ - leftright); \ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + a_type *tnode; \ + rbtn_red_set(a_type, a_field, pathp->node); \ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + pathp->node = tnode; \ + } \ + } \ + } else { \ + a_type *left; \ + rbtn_right_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + left = rbtn_left_get(a_type, a_field, pathp->node); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *tnode; \ + a_type *leftright = rbtn_right_get(a_type, a_field, \ + left); \ + a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ + leftright); \ if (leftrightleft != NULL && rbtn_red_get(a_type, \ a_field, leftrightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (r) */\ - a_type *unode; \ - rbtn_black_set(a_type, a_field, leftrightleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - unode); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_right_set(a_type, a_field, unode, tnode); \ - rbtn_rotate_left(a_type, a_field, unode, tnode); \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (b) */\ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (r) */\ + a_type *unode; \ + rbtn_black_set(a_type, a_field, leftrightleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + unode); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_right_set(a_type, a_field, unode, tnode); \ + rbtn_rotate_left(a_type, a_field, unode, tnode); \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (b) */\ assert(leftright != NULL); \ - rbtn_red_set(a_type, a_field, leftright); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_black_set(a_type, a_field, tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root, which may actually be the tree root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - } \ - return; \ - } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + rbtn_red_set(a_type, a_field, leftright); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_black_set(a_type, a_field, tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root, which may actually be the tree root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + } \ + return; \ + } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, pathp->node); \ - /* Balance restored. */ \ - return; \ - } \ - } else { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, pathp->node); \ + /* Balance restored. */ \ + return; \ + } \ + } else { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - } \ - } \ - } \ - } \ - /* Set root. */ \ - rbtree->rbt_root = path->node; \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + } \ + } \ + } \ + } \ + /* Set root. */ \ + rbtree->rbt_root = path->node; \ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ -} \ -a_attr a_type * \ -a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ +} \ +a_attr a_type * \ +a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return NULL; \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ arg)) != NULL) { \ return ret; \ - } \ + } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp < 0) { \ - a_type *ret; \ - if ((ret = a_prefix##iter_start(rbtree, start, \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp < 0) { \ + a_type *ret; \ + if ((ret = a_prefix##iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ - } \ + } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ - } else if (cmp > 0) { \ + } else if (cmp > 0) { \ return a_prefix##iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ - } \ + } \ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ - cb, arg); \ - } else { \ - ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ - } \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ + cb, arg); \ + } else { \ + ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ + } \ return ret; \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return NULL; \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ - } \ + } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ - a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ - void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp > 0) { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp > 0) { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ - } \ + } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ - } else if (cmp < 0) { \ + } else if (cmp < 0) { \ return a_prefix##reverse_iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ return ret; \ - } \ + } \ return a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtree->rbt_root, cb, arg); \ - } else { \ - ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ - cb, arg); \ - } \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtree->rbt_root, cb, arg); \ + } else { \ + ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ + cb, arg); \ + } \ return ret; \ } \ a_attr void \ @@ -985,7 +985,7 @@ a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ a_type *, void *), void *arg) { \ if (node == NULL) { \ return; \ - } \ + } \ a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ node), cb, arg); \ rbtn_left_set(a_type, a_field, (node), NULL); \ @@ -1001,6 +1001,6 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg) { \ a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ rbtree->rbt_root = NULL; \ -} - -#endif /* RB_H_ */ +} + +#endif /* RB_H_ */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h b/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h index 16ccbebee7..28d87c69cd 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/rtree.h @@ -7,13 +7,13 @@ #include "jemalloc/internal/sc.h" #include "jemalloc/internal/tsd.h" -/* - * This radix tree implementation is tailored to the singular purpose of +/* + * This radix tree implementation is tailored to the singular purpose of * associating metadata with extents that are currently owned by jemalloc. - * - ******************************************************************************* - */ - + * + ******************************************************************************* + */ + /* Number of high insignificant bits. */ #define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) /* Number of low insigificant bits. */ @@ -76,7 +76,7 @@ struct rtree_level_s { unsigned cumbits; }; -typedef struct rtree_s rtree_t; +typedef struct rtree_s rtree_t; struct rtree_s { malloc_mutex_t init_lock; /* Number of elements based on rtree_levels[0].bits. */ @@ -86,13 +86,13 @@ struct rtree_s { rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; #endif }; - -/* + +/* * Split the bits into one to three partitions depending on number of * significant bits. It the number of bits does not divide evenly into the * number of levels, place one remainder bit per level starting at the leaf * level. - */ + */ static const rtree_level_t rtree_levels[] = { #if RTREE_HEIGHT == 1 {RTREE_NSB, RTREE_NHIB + RTREE_NSB} @@ -108,18 +108,18 @@ static const rtree_level_t rtree_levels[] = { # error Unsupported rtree height #endif }; - + bool rtree_new(rtree_t *rtree, bool zeroed); - + typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; - + typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; - + typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; - + typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; #ifdef JEMALLOC_JET @@ -127,7 +127,7 @@ void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); #endif rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); - + JEMALLOC_ALWAYS_INLINE uintptr_t rtree_leafkey(uintptr_t key) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); @@ -137,7 +137,7 @@ rtree_leafkey(uintptr_t key) { uintptr_t mask = ~((ZU(1) << maskbits) - 1); return (key & mask); } - + JEMALLOC_ALWAYS_INLINE size_t rtree_cache_direct_map(uintptr_t key) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); @@ -217,7 +217,7 @@ rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); return extent; -#endif +#endif } JEMALLOC_ALWAYS_INLINE szind_t @@ -229,9 +229,9 @@ rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, #else return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); -#endif +#endif } - + JEMALLOC_ALWAYS_INLINE bool rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { @@ -242,8 +242,8 @@ rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); #endif -} - +} + static inline void rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, extent_t *extent) { @@ -255,9 +255,9 @@ rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); -#endif +#endif } - + static inline void rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, szind_t szind) { @@ -271,11 +271,11 @@ rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); -#else +#else atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); -#endif +#endif } - + static inline void rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool slab) { @@ -290,7 +290,7 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); #endif } - + static inline void rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) { @@ -339,7 +339,7 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, assert(leaf != NULL); uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); return &leaf[subkey]; - } + } /* * Search the L2 LRU cache. On hit, swap the matching element into the * slot in L1 cache, and move the position in L2 up by 1. @@ -377,7 +377,7 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, RTREE_CACHE_CHECK_L2(i); } #undef RTREE_CACHE_CHECK_L2 - + return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, dependent, init_missing); } @@ -392,13 +392,13 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, key, false, true); if (elm == NULL) { return true; - } - + } + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); return false; -} +} JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, @@ -482,7 +482,7 @@ rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, #else *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true); *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true); -#endif +#endif return true; } else { return false; @@ -506,7 +506,7 @@ rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, #endif return false; } - + static inline void rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, szind_t szind, bool slab) { diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/stats.h b/contrib/libs/jemalloc/include/jemalloc/internal/stats.h index 3b9e0eac12..7e67e4b1c8 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/stats.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/stats.h @@ -1,6 +1,6 @@ #ifndef JEMALLOC_INTERNAL_STATS_H #define JEMALLOC_INTERNAL_STATS_H - + /* OPTION(opt, var_name, default, set_value_to) */ #define STATS_PRINT_OPTIONS \ OPTION('J', json, false, true) \ @@ -12,20 +12,20 @@ OPTION('l', large, true, false) \ OPTION('x', mutex, true, false) \ OPTION('e', extents, true, false) - + enum { #define OPTION(o, v, d, s) stats_print_option_num_##v, STATS_PRINT_OPTIONS #undef OPTION stats_print_tot_num_options -}; - +}; + /* Options for stats_print. */ extern bool opt_stats_print; extern char opt_stats_print_opts[stats_print_tot_num_options+1]; - + /* Implements je_malloc_stats_print. */ void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts); - + const char *opts); + #endif /* JEMALLOC_INTERNAL_STATS_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h b/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h index fd6bd0892e..00643ff2db 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/tsd.h @@ -1,6 +1,6 @@ #ifndef JEMALLOC_INTERNAL_TSD_H #define JEMALLOC_INTERNAL_TSD_H - + #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/bin_types.h" @@ -12,7 +12,7 @@ #include "jemalloc/internal/tcache_structs.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/witness.h" - + /* * Thread-Specific-Data layout * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- @@ -58,8 +58,8 @@ typedef void (*test_callback_t)(int *); #else # define MALLOC_TEST_TSD # define MALLOC_TEST_TSD_INITIALIZER -#endif - +#endif + /* O(name, type, nullable type */ #define MALLOC_TSD \ O(tcache_enabled, bool, bool) \ @@ -114,14 +114,14 @@ void tsd_prefork(tsd_t *tsd); void tsd_postfork_parent(tsd_t *tsd); void tsd_postfork_child(tsd_t *tsd); -/* +/* * Call ..._inc when your module wants to take all threads down the slow paths, * and ..._dec when it no longer needs to. - */ + */ void tsd_global_slow_inc(tsdn_t *tsdn); void tsd_global_slow_dec(tsdn_t *tsdn); bool tsd_global_slow(); - + enum { /* Common case --> jnz. */ tsd_state_nominal = 0, @@ -143,7 +143,7 @@ enum { * process of being born / dying. */ tsd_state_nominal_max = 2, - + /* * A thread might free() during its death as its only allocator action; * in such scenarios, we need tsd, but set up in such a way that no @@ -174,13 +174,13 @@ enum { # define tsd_atomic_load atomic_load_u8 # define tsd_atomic_store atomic_store_u8 # define tsd_atomic_exchange atomic_exchange_u8 -#else +#else # define tsd_state_t atomic_u32_t # define tsd_atomic_load atomic_load_u32 # define tsd_atomic_store atomic_store_u32 # define tsd_atomic_exchange atomic_exchange_u32 -#endif - +#endif + /* The actual tsd. */ struct tsd_s { /* @@ -242,16 +242,16 @@ tsdn_tsd(tsdn_t *tsdn) { * header files to avoid cluttering this file. They define tsd_boot0, * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #error #include "jemalloc/internal/tsd_malloc_thread_cleanup.h" -#elif (defined(JEMALLOC_TLS)) +#elif (defined(JEMALLOC_TLS)) #include "jemalloc/internal/tsd_tls.h" -#elif (defined(_WIN32)) +#elif (defined(_WIN32)) #include "jemalloc/internal/tsd_win.h" -#else +#else #include "jemalloc/internal/tsd_generic.h" -#endif - +#endif + /* * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of * foo. This omits some safety checks, and so can be used during tsd @@ -261,7 +261,7 @@ tsdn_tsd(tsdn_t *tsdn) { JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get_unsafe(tsd_t *tsd) { \ return &tsd->TSD_MANGLE(n); \ -} +} MALLOC_TSD #undef O @@ -280,7 +280,7 @@ tsd_##n##p_get(tsd_t *tsd) { \ state == tsd_state_reincarnated || \ state == tsd_state_minimal_initialized); \ return tsd_##n##p_get_unsafe(tsd); \ -} +} MALLOC_TSD #undef O @@ -293,10 +293,10 @@ JEMALLOC_ALWAYS_INLINE nt * \ tsdn_##n##p_get(tsdn_t *tsdn) { \ if (tsdn_null(tsdn)) { \ return NULL; \ - } \ + } \ tsd_t *tsd = tsdn_tsd(tsdn); \ return (nt *)tsd_##n##p_get(tsd); \ -} +} MALLOC_TSD #undef O @@ -305,10 +305,10 @@ MALLOC_TSD JEMALLOC_ALWAYS_INLINE t \ tsd_##n##_get(tsd_t *tsd) { \ return *tsd_##n##p_get(tsd); \ -} +} MALLOC_TSD #undef O - + /* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE void \ @@ -319,7 +319,7 @@ tsd_##n##_set(tsd_t *tsd, t val) { \ } MALLOC_TSD #undef O - + JEMALLOC_ALWAYS_INLINE void tsd_assert_fast(tsd_t *tsd) { /* @@ -330,21 +330,21 @@ tsd_assert_fast(tsd_t *tsd) { assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && tsd_reentrancy_level_get(tsd) == 0); } - + JEMALLOC_ALWAYS_INLINE bool tsd_fast(tsd_t *tsd) { bool fast = (tsd_state_get(tsd) == tsd_state_nominal); if (fast) { tsd_assert_fast(tsd); } - + return fast; } - + JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch_impl(bool init, bool minimal) { tsd_t *tsd = tsd_get(init); - + if (!init && tsd_get_allocates() && tsd == NULL) { return NULL; } diff --git a/contrib/libs/jemalloc/include/jemalloc/internal/util.h b/contrib/libs/jemalloc/include/jemalloc/internal/util.h index 304cb545af..93c5bca57e 100644 --- a/contrib/libs/jemalloc/include/jemalloc/internal/util.h +++ b/contrib/libs/jemalloc/include/jemalloc/internal/util.h @@ -1,8 +1,8 @@ #ifndef JEMALLOC_INTERNAL_UTIL_H #define JEMALLOC_INTERNAL_UTIL_H - + #define UTIL_INLINE static inline - + /* Junk fill patterns. */ #ifndef JEMALLOC_ALLOC_JUNK # define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) @@ -10,58 +10,58 @@ #ifndef JEMALLOC_FREE_JUNK # define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) #endif - -/* - * Wrap a cpp argument that contains commas such that it isn't broken up into - * multiple arguments. - */ + +/* + * Wrap a cpp argument that contains commas such that it isn't broken up into + * multiple arguments. + */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ - + /* cpp macro definition stringification. */ #define STRINGIFY_HELPER(x) #x #define STRINGIFY(x) STRINGIFY_HELPER(x) -/* - * Silence compiler warnings due to uninitialized values. This is used - * wherever the compiler fails to recognize that the variable is never used - * uninitialized. - */ +/* + * Silence compiler warnings due to uninitialized values. This is used + * wherever the compiler fails to recognize that the variable is never used + * uninitialized. + */ #define JEMALLOC_CC_SILENCE_INIT(v) = v #ifdef __GNUC__ # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) -#else +#else # define likely(x) !!(x) # define unlikely(x) !!(x) -#endif - +#endif + #if !defined(JEMALLOC_INTERNAL_UNREACHABLE) # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure -#endif - +#endif + #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() - + /* Set error code. */ UTIL_INLINE void set_errno(int errnum) { -#ifdef _WIN32 - SetLastError(errnum); -#else - errno = errnum; -#endif -} - +#ifdef _WIN32 + SetLastError(errnum); +#else + errno = errnum; +#endif +} + /* Get last error code. */ UTIL_INLINE int get_errno(void) { -#ifdef _WIN32 +#ifdef _WIN32 return GetLastError(); -#else +#else return errno; -#endif -} - +#endif +} + #undef UTIL_INLINE #endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/contrib/libs/jemalloc/include/jemalloc/jemalloc.h b/contrib/libs/jemalloc/include/jemalloc/jemalloc.h index c9694d1ddb..c2d4c56204 100644 --- a/contrib/libs/jemalloc/include/jemalloc/jemalloc.h +++ b/contrib/libs/jemalloc/include/jemalloc/jemalloc.h @@ -1,9 +1,9 @@ #pragma once - + #if defined(__APPLE__) # include "jemalloc-osx.h" #elif defined(_MSC_VER) # include "jemalloc-win.h" -#else +#else # include "jemalloc-linux.h" -#endif +#endif diff --git a/contrib/libs/jemalloc/include/msvc_compat/strings.h b/contrib/libs/jemalloc/include/msvc_compat/strings.h index 996f256ce8..b92cd948b3 100644 --- a/contrib/libs/jemalloc/include/msvc_compat/strings.h +++ b/contrib/libs/jemalloc/include/msvc_compat/strings.h @@ -1,24 +1,24 @@ -#ifndef strings_h -#define strings_h - -/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided - * for both */ +#ifndef strings_h +#define strings_h + +/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided + * for both */ #ifdef _MSC_VER # include <intrin.h> # pragma intrinsic(_BitScanForward) static __forceinline int ffsl(long x) { - unsigned long i; - + unsigned long i; + if (_BitScanForward(&i, x)) { return i + 1; } return 0; -} - +} + static __forceinline int ffs(int x) { return ffsl(x); } - + # ifdef _M_X64 # pragma intrinsic(_BitScanForward64) # endif @@ -47,12 +47,12 @@ static __forceinline int ffsll(unsigned __int64 x) { } return 0; #endif -} - +} + #else # define ffsll(x) __builtin_ffsll(x) # define ffsl(x) __builtin_ffsl(x) # define ffs(x) __builtin_ffs(x) -#endif +#endif #endif /* strings_h */ diff --git a/contrib/libs/jemalloc/src/arena.c b/contrib/libs/jemalloc/src/arena.c index ba50e41033..796523c928 100644 --- a/contrib/libs/jemalloc/src/arena.c +++ b/contrib/libs/jemalloc/src/arena.c @@ -1,7 +1,7 @@ #define JEMALLOC_ARENA_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/div.h" #include "jemalloc/internal/extent_dss.h" @@ -13,9 +13,9 @@ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS -/******************************************************************************/ -/* Data. */ - +/******************************************************************************/ +/* Data. */ + /* * Define names for both unininitialized and initialized phases, so that * options and mallctl processing are straightforward. @@ -28,7 +28,7 @@ const char *percpu_arena_mode_names[] = { "phycpu" }; percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; - + ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; @@ -40,20 +40,20 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { h, SMOOTHSTEP #undef STEP -}; - +}; + static div_info_t arena_binind_div_info[SC_NBINS]; size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; static unsigned huge_arena_ind; -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, bool is_background_thread); @@ -63,9 +63,9 @@ static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin); static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin); - -/******************************************************************************/ - + +/******************************************************************************/ + void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, @@ -78,7 +78,7 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, *ndirty += extents_npages_get(&arena->extents_dirty); *nmuzzy += extents_npages_get(&arena->extents_muzzy); } - + void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, @@ -86,25 +86,25 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, bin_stats_t *bstats, arena_stats_large_t *lstats, arena_stats_extents_t *estats) { cassert(config_stats); - + arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, muzzy_decay_ms, nactive, ndirty, nmuzzy); - + size_t base_allocated, base_resident, base_mapped, metadata_thp; base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, &base_mapped, &metadata_thp); - + arena_stats_lock(tsdn, &arena->stats); - + arena_stats_accum_zu(&astats->mapped, base_mapped + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); arena_stats_accum_zu(&astats->retained, extents_npages_get(&arena->extents_retained) << LG_PAGE); - + atomic_store_zu(&astats->extent_avail, atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED), ATOMIC_RELAXED); - + arena_stats_accum_u64(&astats->decay_dirty.npurge, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.npurge)); @@ -114,7 +114,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_accum_u64(&astats->decay_dirty.purged, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_dirty.purged)); - + arena_stats_accum_u64(&astats->decay_muzzy.npurge, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.npurge)); @@ -124,7 +124,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_accum_u64(&astats->decay_muzzy.purged, arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.decay_muzzy.purged)); - + arena_stats_accum_zu(&astats->base, base_allocated); arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); @@ -134,29 +134,29 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu( &arena->stats.abandoned_vm, ATOMIC_RELAXED)); - + for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].nmalloc); arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); - + uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].ndalloc); arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); - + uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].nrequests); arena_stats_accum_u64(&lstats[i].nrequests, nmalloc + nrequests); arena_stats_accum_u64(&astats->nrequests_large, nmalloc + nrequests); - + /* nfill == nmalloc for large currently. */ arena_stats_accum_u64(&lstats[i].nfills, nmalloc); arena_stats_accum_u64(&astats->nfills_large, nmalloc); - + uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats, &arena->stats.lstats[i].nflushes); arena_stats_accum_u64(&lstats[i].nflushes, nflush); @@ -168,7 +168,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, lstats[i].curlextents += curlextents; arena_stats_accum_zu(&astats->allocated_large, curlextents * sz_index2size(SC_NBINS + i)); - } + } for (pszind_t i = 0; i < SC_NPSIZES; i++) { size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, @@ -204,24 +204,24 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, cache_bin_t *tbin = &descriptor->bins_small[i]; arena_stats_accum_zu(&astats->tcache_bytes, tbin->ncached * sz_index2size(i)); - } + } for (; i < nhbins; i++) { cache_bin_t *tbin = &descriptor->bins_large[i]; arena_stats_accum_zu(&astats->tcache_bytes, tbin->ncached * sz_index2size(i)); } - } + } malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[arena_prof_mutex_tcache_list], &arena->tcache_ql_mtx); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); - + #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ malloc_mutex_lock(tsdn, &arena->mtx); \ malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ &arena->mtx); \ malloc_mutex_unlock(tsdn, &arena->mtx); - + /* Gather per arena mutex profiling data. */ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, @@ -239,7 +239,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, READ_ARENA_MUTEX_PROF_DATA(base->mtx, arena_prof_mutex_base) #undef READ_ARENA_MUTEX_PROF_DATA - + nstime_copy(&astats->uptime, &arena->create_time); nstime_update(&astats->uptime); nstime_subtract(&astats->uptime, &arena->create_time); @@ -249,30 +249,30 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, bin_stats_merge(tsdn, &bstats[i], &arena->bins[i].bin_shards[j]); } - } -} - + } +} + void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - + extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, extent); if (arena_dirty_decay_ms_get(arena) == 0) { arena_decay_dirty(tsdn, arena, false, true); } else { arena_background_thread_inactivity_check(tsdn, arena, false); - } -} - + } +} + static void * arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { void *ret; arena_slab_data_t *slab_data = extent_slab_data_get(slab); size_t regind; - + assert(extent_nfree_get(slab) > 0); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); @@ -281,16 +281,16 @@ arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { (uintptr_t)(bin_info->reg_size * regind)); extent_nfree_dec(slab); return ret; -} - -static void +} + +static void arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, unsigned cnt, void** ptrs) { arena_slab_data_t *slab_data = extent_slab_data_get(slab); - + assert(extent_nfree_get(slab) >= cnt); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); - + #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) for (unsigned i = 0; i < cnt; i++) { size_t regind = bitmap_sfu(slab_data->bitmap, @@ -311,7 +311,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, if (pop > (cnt - i)) { pop = cnt - i; } - + /* * Load from memory locations only once, outside the * hot loop below. @@ -322,116 +322,116 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, size_t bit = cfs_lu(&g); size_t regind = shift + bit; *(ptrs + i) = (void *)(base + regsize * regind); - + i++; } slab_data->bitmap[group] = g; - } + } #endif extent_nfree_sub(slab, cnt); -} - +} + #ifndef JEMALLOC_JET static #endif size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { size_t diff, regind; - + /* Freeing a pointer outside the slab can cause assertion failure. */ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); /* Freeing an interior pointer can cause assertion failure. */ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % (uintptr_t)bin_infos[binind].reg_size == 0); - + diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); - + /* Avoid doing division with a variable divisor. */ regind = div_compute(&arena_binind_div_info[binind], diff); - + assert(regind < bin_infos[binind].nregs); - + return regind; -} - +} + static void arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { szind_t binind = extent_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; size_t regind = arena_slab_regind(slab, binind, ptr); - + assert(extent_nfree_get(slab) < bin_info->nregs); /* Freeing an unallocated pointer can cause assertion failure. */ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); - + bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); extent_nfree_inc(slab); -} - +} + static void arena_nactive_add(arena_t *arena, size_t add_pages) { atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); -} - +} + static void arena_nactive_sub(arena_t *arena, size_t sub_pages) { assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); -} - +} + static void arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t index, hindex; - + cassert(config_stats); - + if (usize < SC_LARGE_MINCLASS) { usize = SC_LARGE_MINCLASS; } index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; - + arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.lstats[hindex].nmalloc, 1); -} - -static void +} + +static void arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t index, hindex; - + cassert(config_stats); if (usize < SC_LARGE_MINCLASS) { usize = SC_LARGE_MINCLASS; - } + } index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.lstats[hindex].ndalloc, 1); -} - -static void +} + +static void arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, size_t usize) { arena_large_dalloc_stats_update(tsdn, arena, oldusize); arena_large_malloc_stats_update(tsdn, arena, usize); } - + static bool arena_may_have_muzzy(arena_t *arena) { return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0)); -} - +} + extent_t * arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; - + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - + szind_t szind = sz_size2index(usize); size_t mapped_add; bool commit = true; @@ -442,18 +442,18 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, extent = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); - } + } size_t size = usize + sz_large_pad; if (extent == NULL) { extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); if (config_stats) { - /* + /* * extent may be NULL on OOM, but in that case * mapped_add isn't used below, so there's no need to * conditionlly set it to 0 here. - */ + */ mapped_add = size; } } else if (config_stats) { @@ -467,15 +467,15 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, if (mapped_add != 0) { arena_stats_add_zu(tsdn, &arena->stats, &arena->stats.mapped, mapped_add); - } + } arena_stats_unlock(tsdn, &arena->stats); - } + } arena_nactive_add(arena, size >> LG_PAGE); - } - + } + return extent; -} - +} + void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { if (config_stats) { @@ -485,28 +485,28 @@ arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); -} - +} + void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = oldusize - usize; - + if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_stats_unlock(tsdn, &arena->stats); } arena_nactive_sub(arena, udiff >> LG_PAGE); -} - +} + void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = usize - oldusize; - + if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); @@ -514,23 +514,23 @@ arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, } arena_nactive_add(arena, udiff >> LG_PAGE); } - + static ssize_t arena_decay_ms_read(arena_decay_t *decay) { return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); } - + static void arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); } - + static void arena_decay_deadline_init(arena_decay_t *decay) { - /* + /* * Generate a new deadline that is uniformly random within the next * epoch after the current one. - */ + */ nstime_copy(&decay->deadline, &decay->epoch); nstime_add(&decay->deadline, &decay->interval); if (arena_decay_ms_read(decay) > 0) { @@ -539,20 +539,20 @@ arena_decay_deadline_init(arena_decay_t *decay) { nstime_init(&jitter, prng_range_u64(&decay->jitter_state, nstime_ns(&decay->interval))); nstime_add(&decay->deadline, &jitter); - } -} - + } +} + static bool arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { return (nstime_compare(&decay->deadline, time) <= 0); } - + static size_t arena_decay_backlog_npages_limit(const arena_decay_t *decay) { uint64_t sum; size_t npages_limit_backlog; unsigned i; - + /* * For each element of decay_backlog, multiply by the corresponding * fixed-point smoothstep decay factor. Sum the products, then divide @@ -563,16 +563,16 @@ arena_decay_backlog_npages_limit(const arena_decay_t *decay) { sum += decay->backlog[i] * h_steps[i]; } npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); - + return npages_limit_backlog; -} - +} + static void arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { size_t npages_delta = (current_npages > decay->nunpurged) ? current_npages - decay->nunpurged : 0; decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; - + if (config_debug) { if (current_npages > decay->ceil_npages) { decay->ceil_npages = current_npages; @@ -584,7 +584,7 @@ arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { } } } - + static void arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, size_t current_npages) { @@ -593,20 +593,20 @@ arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, sizeof(size_t)); } else { size_t nadvance_z = (size_t)nadvance_u64; - + assert((uint64_t)nadvance_z == nadvance_u64); - + memmove(decay->backlog, &decay->backlog[nadvance_z], (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); if (nadvance_z > 1) { memset(&decay->backlog[SMOOTHSTEP_NSTEPS - nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); - } - } - + } + } + arena_decay_backlog_update_last(decay, current_npages); -} - +} + static void arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, size_t current_npages, size_t npages_limit, @@ -615,21 +615,21 @@ arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_to_limit(tsdn, arena, decay, extents, false, npages_limit, current_npages - npages_limit, is_background_thread); - } -} - -static void + } +} + +static void arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, size_t current_npages) { assert(arena_decay_deadline_reached(decay, time)); - + nstime_t delta; nstime_copy(&delta, time); nstime_subtract(&delta, &decay->epoch); - + uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); assert(nadvance_u64 > 0); - + /* Add nadvance_u64 decay intervals to epoch. */ nstime_copy(&delta, &decay->interval); nstime_imultiply(&delta, nadvance_u64); @@ -640,25 +640,25 @@ arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, /* Update the backlog. */ arena_decay_backlog_update(decay, nadvance_u64, current_npages); -} - +} + static void arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, const nstime_t *time, bool is_background_thread) { size_t current_npages = extents_npages_get(extents); arena_decay_epoch_advance_helper(decay, time, current_npages); - + size_t npages_limit = arena_decay_backlog_npages_limit(decay); /* We may unlock decay->mtx when try_purge(). Finish logging first. */ decay->nunpurged = (npages_limit > current_npages) ? npages_limit : current_npages; - + if (!background_thread_enabled() || is_background_thread) { arena_decay_try_purge(tsdn, arena, decay, extents, current_npages, npages_limit, is_background_thread); - } -} - + } +} + static void arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { arena_decay_ms_write(decay, decay_ms); @@ -667,7 +667,7 @@ arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { KQU(1000000)); nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); } - + nstime_init(&decay->epoch, 0); nstime_update(&decay->epoch); decay->jitter_state = (uint64_t)(uintptr_t)decay; @@ -675,7 +675,7 @@ arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { decay->nunpurged = 0; memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); } - + static bool arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, arena_stats_decay_t *stats) { @@ -684,7 +684,7 @@ arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, assert(((char *)decay)[i] == 0); } decay->ceil_npages = 0; - } + } if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, malloc_mutex_rank_exclusive)) { return true; @@ -696,25 +696,25 @@ arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, decay->stats = stats; } return false; -} - +} + static bool arena_decay_ms_valid(ssize_t decay_ms) { if (decay_ms < -1) { return false; - } + } if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * KQU(1000)) { return true; } return false; -} - +} + static bool arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool is_background_thread) { malloc_mutex_assert_owner(tsdn, &decay->mtx); - + /* Purge all or nothing if the option is disabled. */ ssize_t decay_ms = arena_decay_ms_read(decay); if (decay_ms <= 0) { @@ -725,7 +725,7 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, } return false; } - + nstime_t time; nstime_init(&time, 0); nstime_update(&time); @@ -746,14 +746,14 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, /* Verify that time does not go backwards. */ assert(nstime_compare(&decay->epoch, &time) <= 0); } - - /* + + /* * If the deadline has been reached, advance to the current epoch and * purge to the new limit if necessary. Note that dirty pages created * during the current epoch are not subject to purge until a future * epoch, so as a result purging only happens during epoch advances, or * being triggered by background threads (scheduled event). - */ + */ bool advance_epoch = arena_decay_deadline_reached(decay, &time); if (advance_epoch) { arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, @@ -763,70 +763,70 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_npages_get(extents), arena_decay_backlog_npages_limit(decay), is_background_thread); - } - + } + return advance_epoch; -} - +} + static ssize_t arena_decay_ms_get(arena_decay_t *decay) { return arena_decay_ms_read(decay); } - + ssize_t arena_dirty_decay_ms_get(arena_t *arena) { return arena_decay_ms_get(&arena->decay_dirty); -} - +} + ssize_t arena_muzzy_decay_ms_get(arena_t *arena) { return arena_decay_ms_get(&arena->decay_muzzy); -} - +} + static bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; } - + malloc_mutex_lock(tsdn, &decay->mtx); - /* + /* * Restart decay backlog from scratch, which may cause many dirty pages * to be immediately purged. It would conceptually be possible to map * the old backlog onto the new backlog, but there is no justification * for such complexity since decay_ms changes are intended to be * infrequent, either between the {-1, 0, >0} states, or a one-time * arbitrary change during initial arena configuration. - */ + */ arena_decay_reinit(decay, decay_ms); arena_maybe_decay(tsdn, arena, decay, extents, false); malloc_mutex_unlock(tsdn, &decay->mtx); - + return false; } - + bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, &arena->extents_dirty, decay_ms); } - + bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms) { return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, &arena->extents_muzzy, decay_ms); -} - +} + static size_t arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, size_t npages_decay_max, extent_list_t *decay_extents) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - + /* Stash extents according to npages_limit. */ size_t nstashed = 0; extent_t *extent; @@ -835,31 +835,31 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, npages_limit)) != NULL) { extent_list_append(decay_extents, extent); nstashed += extent_size_get(extent) >> LG_PAGE; - } + } return nstashed; -} - -static size_t +} + +static size_t arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, bool all, extent_list_t *decay_extents, bool is_background_thread) { size_t nmadvise, nunmapped; size_t npurged; - + if (config_stats) { - nmadvise = 0; + nmadvise = 0; nunmapped = 0; } - npurged = 0; - + npurged = 0; + ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); for (extent_t *extent = extent_list_first(decay_extents); extent != NULL; extent = extent_list_first(decay_extents)) { if (config_stats) { nmadvise++; - } + } size_t npages = extent_size_get(extent) >> LG_PAGE; - npurged += npages; + npurged += npages; extent_list_remove(decay_extents, extent); switch (extents_state_get(extents)) { case extent_state_active: @@ -887,8 +887,8 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, default: not_reached(); } - } - + } + if (config_stats) { arena_stats_lock(tsdn, &arena->stats); arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, @@ -903,8 +903,8 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, } return npurged; -} - +} + /* * npages_limit: Decay at most npages_decay_max pages without violating the * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper @@ -912,25 +912,25 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, * stashed), otherwise unbounded new pages could be added to extents during the * current decay run, so that the purging thread never finishes. */ -static void +static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, bool is_background_thread) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 1); malloc_mutex_assert_owner(tsdn, &decay->mtx); - + if (decay->purging) { return; - } + } decay->purging = true; malloc_mutex_unlock(tsdn, &decay->mtx); - + extent_hooks_t *extent_hooks = extent_hooks_get(arena); - + extent_list_t decay_extents; extent_list_init(&decay_extents); - + size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, npages_limit, npages_decay_max, &decay_extents); if (npurge != 0) { @@ -938,12 +938,12 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, &extent_hooks, decay, extents, all, &decay_extents, is_background_thread); assert(npurged == npurge); - } - + } + malloc_mutex_lock(tsdn, &decay->mtx); decay->purging = false; -} - +} + static bool arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, extents_t *extents, bool is_background_thread, bool all) { @@ -952,15 +952,15 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, extents_npages_get(extents), is_background_thread); malloc_mutex_unlock(tsdn, &decay->mtx); - + return false; - } - + } + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { /* No need to wait if another thread is in progress. */ return true; } - + bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, is_background_thread); size_t npages_new; @@ -969,46 +969,46 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; } malloc_mutex_unlock(tsdn, &decay->mtx); - + if (have_background_thread && background_thread_enabled() && epoch_advanced && !is_background_thread) { background_thread_interval_check(tsdn, arena, decay, npages_new); } - + return false; } - + static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->decay_dirty, &arena->extents_dirty, is_background_thread, all); } - + static bool arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, &arena->extents_muzzy, is_background_thread, all); -} - -void +} + +void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { return; } arena_decay_muzzy(tsdn, arena, is_background_thread, all); -} - -static void +} + +static void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); - + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); } - + static void arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) > 0); @@ -1017,7 +1017,7 @@ arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { bin->stats.nonfull_slabs++; } } - + static void arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { extent_heap_remove(&bin->slabs_nonfull, slab); @@ -1025,20 +1025,20 @@ arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { bin->stats.nonfull_slabs--; } } - + static extent_t * arena_bin_slabs_nonfull_tryget(bin_t *bin) { extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); if (slab == NULL) { return NULL; - } + } if (config_stats) { bin->stats.reslabs++; bin->stats.nonfull_slabs--; } return slab; } - + static void arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) == 0); @@ -1052,19 +1052,19 @@ arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { } extent_list_append(&bin->slabs_full, slab); } - + static void arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { if (arena_is_auto(arena)) { return; - } + } extent_list_remove(&bin->slabs_full, slab); -} - -static void +} + +static void arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { extent_t *slab; - + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (bin->slabcur != NULL) { slab = bin->slabcur; @@ -1072,7 +1072,7 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); - } + } while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); @@ -1091,10 +1091,10 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { } malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - + void arena_reset(tsd_t *tsd, arena_t *arena) { - /* + /* * Locking in this function is unintuitive. The caller guarantees that * no concurrent operations are happening in this arena, but there are * still reasons that some locking is necessary: @@ -1106,23 +1106,23 @@ arena_reset(tsd_t *tsd, arena_t *arena) { * - mallctl("epoch", ...) may concurrently refresh stats. While * strictly speaking this is a "concurrent operation", disallowing * stats refreshes would impose an inconvenient burden. - */ - + */ + /* Large allocations. */ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); - + for (extent_t *extent = extent_list_first(&arena->large); extent != NULL; extent = extent_list_first(&arena->large)) { void *ptr = extent_base_get(extent); size_t usize; - + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); - + if (config_stats || (config_prof && opt_prof)) { usize = sz_index2size(alloc_ctx.szind); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); @@ -1133,9 +1133,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) { } large_dalloc(tsd_tsdn(tsd), extent); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); - } + } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); - + /* Bins. */ for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { @@ -1149,7 +1149,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) { static void arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { - /* + /* * Iterate over the retained extents and destroy them. This gives the * extent allocator underlying the extent hooks an opportunity to unmap * all retained memory without having to keep its own metadata @@ -1157,21 +1157,21 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { * leaked here, so best practice is to avoid dss for arenas to be * destroyed, or provide custom extent hooks that track retained * dss-based extents for later reuse. - */ + */ extent_hooks_t *extent_hooks = extent_hooks_get(arena); extent_t *extent; while ((extent = extents_evict(tsdn, arena, &extent_hooks, &arena->extents_retained, 0)) != NULL) { extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); } -} - +} + void arena_destroy(tsd_t *tsd, arena_t *arena) { assert(base_ind_get(arena->base) >= narenas_auto); assert(arena_nthreads_get(arena, false) == 0); assert(arena_nthreads_get(arena, true) == 0); - + /* * No allocations have occurred since arena_reset() was called. * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached @@ -1179,11 +1179,11 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { */ assert(extents_npages_get(&arena->extents_dirty) == 0); assert(extents_npages_get(&arena->extents_muzzy) == 0); - + /* Deallocate retained memory. */ arena_destroy_retained(tsd_tsdn(tsd), arena); - /* + /* * Remove the arena pointer from the arenas array. We rely on the fact * that there is no way for the application to get a dirty read from the * arenas array unless there is an inherent race in the application @@ -1192,45 +1192,45 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { * long as we use an atomic write to update the arenas array, the * application will get a clean read any time after it synchronizes * knowledge that the arena is no longer valid. - */ + */ arena_set(base_ind_get(arena->base), NULL); - + /* * Destroy the base allocator, which manages all metadata ever mapped by * this arena. */ base_delete(tsd_tsdn(tsd), arena->base); -} - +} + static extent_t * arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, szind_t szind) { extent_t *slab; bool zero, commit; - + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - + zero = false; commit = true; slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); - + if (config_stats && slab != NULL) { arena_stats_mapped_add(tsdn, &arena->stats, bin_info->slab_size); - } - + } + return slab; -} - +} + static extent_t * arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, const bin_info_t *bin_info) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; szind_t szind = sz_size2index(bin_info->reg_size); bool zero = false; @@ -1242,7 +1242,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard slab = extents_alloc(tsdn, arena, &extent_hooks, &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit); - } + } if (slab == NULL) { slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, bin_info, szind); @@ -1251,66 +1251,66 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard } } assert(extent_slab_get(slab)); - + /* Initialize slab internals. */ arena_slab_data_t *slab_data = extent_slab_data_get(slab); extent_nfree_binshard_set(slab, bin_info->nregs, binshard); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); - + arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); - + return slab; -} - +} + static extent_t * arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, unsigned binshard) { extent_t *slab; const bin_info_t *bin_info; - + /* Look for a usable slab. */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { return slab; - } + } /* No existing slabs have any space available. */ - + bin_info = &bin_infos[binind]; - + /* Allocate a new slab. */ malloc_mutex_unlock(tsdn, &bin->lock); - /******************************/ + /******************************/ slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info); - /********************************/ + /********************************/ malloc_mutex_lock(tsdn, &bin->lock); if (slab != NULL) { - if (config_stats) { + if (config_stats) { bin->stats.nslabs++; bin->stats.curslabs++; - } + } return slab; - } - - /* + } + + /* * arena_slab_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped bin->lock above, - * so search one more time. - */ + * sufficient memory available while this one dropped bin->lock above, + * so search one more time. + */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { return slab; } - + return NULL; -} - +} + /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ -static void * +static void * arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, unsigned binshard) { const bin_info_t *bin_info; extent_t *slab; - + bin_info = &bin_infos[binind]; if (!arena_is_auto(arena) && bin->slabcur != NULL) { arena_bin_slabs_full_insert(arena, bin, bin->slabcur); @@ -1318,10 +1318,10 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, } slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard); if (bin->slabcur != NULL) { - /* + /* * Another thread updated slabcur while this one ran without the * bin lock in arena_bin_nonfull_slab_get(). - */ + */ if (extent_nfree_get(bin->slabcur) > 0) { void *ret = arena_slab_reg_alloc(bin->slabcur, bin_info); @@ -1345,21 +1345,21 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, } return ret; } - + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); bin->slabcur = NULL; - } - + } + if (slab == NULL) { return NULL; } bin->slabcur = slab; - + assert(extent_nfree_get(bin->slabcur) > 0); - + return arena_slab_reg_alloc(slab, bin_info); } - + /* Choose a bin shard and return the locked bin. */ bin_t * arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, @@ -1375,15 +1375,15 @@ arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, malloc_mutex_lock(tsdn, &bin->lock); return bin; -} - -void +} + +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill, cnt; - - assert(tbin->ncached == 0); - + + assert(tbin->ncached == 0); + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { prof_idump(tsdn); } @@ -1391,7 +1391,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, unsigned binshard; bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); - for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> + for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tcache->lg_fill_div[binind]); i < nfill; i += cnt) { extent_t *slab; if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > @@ -1421,7 +1421,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, } /* Insert such that low regions get used first. */ *(tbin->avail - nfill + i) = ptr; - } + } if (config_fill && unlikely(opt_junk_alloc)) { for (unsigned j = 0; j < cnt; j++) { void* ptr = *(tbin->avail - nfill + i + j); @@ -1429,110 +1429,110 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, true); } } - } - if (config_stats) { - bin->stats.nmalloc += i; - bin->stats.nrequests += tbin->tstats.nrequests; + } + if (config_stats) { + bin->stats.nmalloc += i; + bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.curregs += i; - bin->stats.nfills++; - tbin->tstats.nrequests = 0; - } + bin->stats.nfills++; + tbin->tstats.nrequests = 0; + } malloc_mutex_unlock(tsdn, &bin->lock); - tbin->ncached = i; + tbin->ncached = i; arena_decay_tick(tsdn, arena); -} - -void +} + +void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { if (!zero) { memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); - } -} - -static void + } +} + +static void arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); -} +} arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = arena_dalloc_junk_small_impl; - + static void * arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { - void *ret; + void *ret; bin_t *bin; size_t usize; extent_t *slab; - + assert(binind < SC_NBINS); usize = sz_index2size(binind); unsigned binshard; bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); - + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); } else { ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard); } - - if (ret == NULL) { + + if (ret == NULL) { malloc_mutex_unlock(tsdn, &bin->lock); return NULL; - } - - if (config_stats) { - bin->stats.nmalloc++; - bin->stats.nrequests++; + } + + if (config_stats) { + bin->stats.nmalloc++; + bin->stats.nrequests++; bin->stats.curregs++; - } + } malloc_mutex_unlock(tsdn, &bin->lock); if (config_prof && arena_prof_accum(tsdn, arena, usize)) { prof_idump(tsdn); } - + if (!zero) { - if (config_fill) { + if (config_fill) { if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, + arena_alloc_junk_small(ret, &bin_infos[binind], false); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } - } - } else { + } + } else { if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &bin_infos[binind], - true); - } + true); + } memset(ret, 0, usize); - } - + } + arena_decay_tick(tsdn, arena); return ret; -} - -void * +} + +void * arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero) { assert(!tsdn_null(tsdn) || arena != NULL); - + if (likely(!tsdn_null(tsdn))) { arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size); - } + } if (unlikely(arena == NULL)) { return NULL; - } - + } + if (likely(size <= SC_SMALL_MAXCLASS)) { return arena_malloc_small(tsdn, arena, ind, zero); - } + } return large_malloc(tsdn, arena, sz_index2size(ind), zero); -} - -void * +} + +void * arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { - void *ret; - + void *ret; + if (usize <= SC_SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { @@ -1548,25 +1548,25 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, } return ret; } - + void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { cassert(config_prof); assert(ptr != NULL); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); assert(usize <= SC_SMALL_MAXCLASS); - + if (config_opt_safety_checks) { safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS); - } - + } + rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); arena_t *arena = extent_arena_get(extent); - + szind_t szind = sz_size2index(usize); extent_szind_set(extent, szind); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, @@ -1575,24 +1575,24 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { prof_accum_cancel(tsdn, &arena->prof_accum, usize); assert(isalloc(tsdn, ptr) == usize); -} - +} + static size_t arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { - cassert(config_prof); - assert(ptr != NULL); - + cassert(config_prof); + assert(ptr != NULL); + extent_szind_set(extent, SC_NBINS); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, SC_NBINS, false); - + assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); return SC_LARGE_MINCLASS; -} - +} + void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { @@ -1618,7 +1618,7 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, } } -static void +static void arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { /* Dissociate slab from bin. */ if (slab == bin->slabcur) { @@ -1626,7 +1626,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { } else { szind_t binind = extent_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; - + /* * The following block's conditional is necessary because if the * slab only contains one region, then it never gets inserted @@ -1636,36 +1636,36 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { arena_bin_slabs_full_remove(arena, bin, slab); } else { arena_bin_slabs_nonfull_remove(bin, slab); - } - } -} - -static void + } + } +} + +static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(slab != bin->slabcur); - + malloc_mutex_unlock(tsdn, &bin->lock); - /******************************/ + /******************************/ arena_slab_dalloc(tsdn, arena, slab); /****************************/ malloc_mutex_lock(tsdn, &bin->lock); if (config_stats) { bin->stats.curslabs--; - } -} - -static void + } +} + +static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(extent_nfree_get(slab) > 0); - - /* + + /* * Make sure that if bin->slabcur is non-NULL, it refers to the * oldest/lowest non-full slab. It is okay to NULL slabcur out rather * than proactively keeping it pointing at the oldest/lowest non-full * slab. - */ + */ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { /* Switch slabcur. */ if (extent_nfree_get(bin->slabcur) > 0) { @@ -1680,18 +1680,18 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, } else { arena_bin_slabs_nonfull_insert(bin, slab); } -} - +} + static void arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, extent_t *slab, void *ptr, bool junked) { arena_slab_data_t *slab_data = extent_slab_data_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; - + if (!junked && config_fill && unlikely(opt_junk_free)) { - arena_dalloc_junk_small(ptr, bin_info); + arena_dalloc_junk_small(ptr, bin_info); } - + arena_slab_reg_dalloc(slab, slab_data, ptr); unsigned nfree = extent_nfree_get(slab); if (nfree == bin_info->nregs) { @@ -1701,54 +1701,54 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, arena_bin_slabs_full_remove(arena, bin, slab); arena_bin_lower_slab(tsdn, arena, slab, bin); } - - if (config_stats) { - bin->stats.ndalloc++; + + if (config_stats) { + bin->stats.ndalloc++; bin->stats.curregs--; - } -} - -void + } +} + +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, extent_t *extent, void *ptr) { arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, true); -} - +} + static void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { szind_t binind = extent_szind_get(extent); unsigned binshard = extent_binshard_get(extent); bin_t *bin = &arena->bins[binind].bin_shards[binshard]; - + malloc_mutex_lock(tsdn, &bin->lock); arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, false); malloc_mutex_unlock(tsdn, &bin->lock); -} - +} + void arena_dalloc_small(tsdn_t *tsdn, void *ptr) { extent_t *extent = iealloc(tsdn, ptr); arena_t *arena = extent_arena_get(extent); - + arena_dalloc_bin(tsdn, arena, extent, ptr); arena_decay_tick(tsdn, arena); -} - +} + bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero, size_t *newsize) { bool ret; /* Calls with non-zero extra had to clamp extra. */ assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); - + extent_t *extent = iealloc(tsdn, ptr); if (unlikely(size > SC_LARGE_MAXCLASS)) { ret = true; goto done; } - + size_t usize_min = sz_s2u(size); size_t usize_max = sz_s2u(size + extra); if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min @@ -1764,7 +1764,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, && (size > oldsize || usize_max < oldsize)) { ret = true; goto done; - } + } arena_decay_tick(tsdn, extent_arena_get(extent)); ret = false; @@ -1774,14 +1774,14 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, zero); } else { ret = true; - } + } done: assert(extent == iealloc(tsdn, ptr)); *newsize = extent_usize_get(extent); - + return ret; -} - +} + static void * arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { @@ -1794,8 +1794,8 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, return NULL; } return ipalloct(tsdn, usize, alignment, zero, tcache, arena); -} - +} + void * arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, @@ -1804,7 +1804,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { return NULL; } - + if (likely(usize <= SC_SMALL_MAXCLASS)) { /* Try to avoid moving the allocation. */ UNUSED size_t newsize; @@ -1817,29 +1817,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, return ptr; } } - + if (oldsize >= SC_LARGE_MINCLASS && usize >= SC_LARGE_MINCLASS) { return large_ralloc(tsdn, arena, ptr, usize, alignment, zero, tcache, hook_args); } - /* + /* * size and oldsize are different enough that we need to move the * object. In that case, fall back to allocating new space and copying. - */ + */ void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero, tcache); if (ret == NULL) { return NULL; - } - + } + hook_invoke_alloc(hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, hook_args->args); hook_invoke_dalloc(hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); - + /* * Junk/zero-filling were already done by * ipalloc()/arena_malloc(). @@ -1849,101 +1849,101 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); return ret; } - + dss_prec_t arena_dss_prec_get(arena_t *arena) { return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); } - + bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { if (!have_dss) { return (dss_prec != dss_prec_disabled); - } + } atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); return false; } - + ssize_t arena_dirty_decay_ms_default_get(void) { return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); -} - +} + bool arena_dirty_decay_ms_default_set(ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; - } + } atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); return false; -} - +} + ssize_t arena_muzzy_decay_ms_default_get(void) { return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); } - + bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { if (!arena_decay_ms_valid(decay_ms)) { return true; - } + } atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); return false; -} - -bool +} + +bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit) { assert(opt_retain); - + pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); if (new_limit != NULL) { size_t limit = *new_limit; /* Grow no more than the new limit. */ if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { return true; - } - } - + } + } + malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); if (old_limit != NULL) { *old_limit = sz_pind2sz(arena->retain_grow_limit); - } + } if (new_limit != NULL) { arena->retain_grow_limit = new_ind; } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); - + return false; } - + unsigned arena_nthreads_get(arena_t *arena, bool internal) { return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); -} - +} + void arena_nthreads_inc(arena_t *arena, bool internal) { atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); -} - -void +} + +void arena_nthreads_dec(arena_t *arena, bool internal) { atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); } - + size_t arena_extent_sn_next(arena_t *arena) { return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); -} - +} + arena_t * arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; base_t *base; - unsigned i; - + unsigned i; + if (ind == 0) { base = b0get(); } else { @@ -1952,25 +1952,25 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { return NULL; } } - + unsigned nbins_total = 0; for (i = 0; i < SC_NBINS; i++) { nbins_total += bin_infos[i].n_shards; - } + } size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total; arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); if (arena == NULL) { goto label_error; } - + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); arena->last_thd = NULL; - + if (config_stats) { if (arena_stats_init(tsdn, &arena->stats)) { goto label_error; - } + } ql_new(&arena->tcache_ql); ql_new(&arena->cache_bin_array_descriptor_ql); @@ -1978,14 +1978,14 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { goto label_error; } - } - + } + if (config_prof) { if (prof_accum_init(tsdn, &arena->prof_accum)) { goto label_error; } } - + if (config_cache_oblivious) { /* * A nondeterministic seed based on the address of arena reduces @@ -1997,9 +1997,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { atomic_store_zu(&arena->offset_state, config_debug ? ind : (size_t)(uintptr_t)arena, ATOMIC_RELAXED); } - + atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); - + atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), ATOMIC_RELAXED); @@ -2009,8 +2009,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { if (malloc_mutex_init(&arena->large_mtx, "arena_large", WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { goto label_error; - } - + } + /* * Delay coalescing for dirty extents despite the disruptive effect on * memory layout for best-fit extent allocation, since cached extents @@ -2039,7 +2039,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { false)) { goto label_error; } - + if (arena_decay_init(&arena->decay_dirty, arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { goto label_error; @@ -2048,21 +2048,21 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { goto label_error; } - + arena->extent_grow_next = sz_psz2ind(HUGEPAGE); arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS); if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { goto label_error; } - + extent_avail_new(&arena->extent_avail); if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { goto label_error; } - - /* Initialize bins. */ + + /* Initialize bins. */ uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t); atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); for (i = 0; i < SC_NBINS; i++) { @@ -2075,16 +2075,16 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { goto label_error; } } - } + } assert(bin_addr == (uintptr_t)arena + arena_size); - + arena->base = base; /* Set arena before creating background threads. */ arena_set(ind, arena); - + nstime_init(&arena->create_time, 0); nstime_update(&arena->create_time); - + /* We don't support reentrancy for arena 0 bootstrapping. */ if (ind != 0) { /* @@ -2095,18 +2095,18 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { pre_reentrancy(tsdn_tsd(tsdn), arena); if (test_hooks_arena_new_hook) { test_hooks_arena_new_hook(); - } + } post_reentrancy(tsdn_tsd(tsdn)); - } - + } + return arena; label_error: if (ind != 0) { base_delete(tsdn, base); - } + } return NULL; } - + arena_t * arena_choose_huge(tsd_t *tsd) { /* huge_arena_ind can be 0 during init (will use a0). */ @@ -2122,27 +2122,27 @@ arena_choose_huge(tsd_t *tsd) { if (huge_arena == NULL) { return NULL; } - /* + /* * Purge eagerly for huge allocations, because: 1) number of * huge allocations is usually small, which means ticker based * decay is not reliable; and 2) less immediate reuse is * expected for huge allocations. - */ + */ if (arena_dirty_decay_ms_default_get() > 0) { arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); - } + } if (arena_muzzy_decay_ms_default_get() > 0) { arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); } } - + return huge_arena; } - + bool arena_init_huge(void) { bool huge_enabled; - + /* The threshold should be large size class. */ if (opt_oversize_threshold > SC_LARGE_MAXCLASS || opt_oversize_threshold < SC_LARGE_MINCLASS) { @@ -2155,10 +2155,10 @@ arena_init_huge(void) { oversize_threshold = opt_oversize_threshold; huge_enabled = true; } - + return huge_enabled; -} - +} + bool arena_is_huge(unsigned arena_ind) { if (huge_arena_ind == 0) { @@ -2166,7 +2166,7 @@ arena_is_huge(unsigned arena_ind) { } return (arena_ind == huge_arena_ind); } - + void arena_boot(sc_data_t *sc_data) { arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); @@ -2176,44 +2176,44 @@ arena_boot(sc_data_t *sc_data) { div_init(&arena_binind_div_info[i], (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); } -} - -void +} + +void arena_prefork0(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); } - + void arena_prefork1(tsdn_t *tsdn, arena_t *arena) { if (config_stats) { malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); - } + } } - + void arena_prefork2(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); } - + void arena_prefork3(tsdn_t *tsdn, arena_t *arena) { extents_prefork(tsdn, &arena->extents_dirty); extents_prefork(tsdn, &arena->extents_muzzy); extents_prefork(tsdn, &arena->extents_retained); -} - -void +} + +void arena_prefork4(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); } - + void arena_prefork5(tsdn_t *tsdn, arena_t *arena) { base_prefork(tsdn, arena->base); -} - -void +} + +void arena_prefork6(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->large_mtx); } @@ -2229,8 +2229,8 @@ arena_prefork7(tsdn_t *tsdn, arena_t *arena) { void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { - unsigned i; - + unsigned i; + for (i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_postfork_parent(tsdn, @@ -2249,12 +2249,12 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { if (config_stats) { malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); } -} - -void +} + +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { - unsigned i; - + unsigned i; + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { @@ -2295,4 +2295,4 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { if (config_stats) { malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); } -} +} diff --git a/contrib/libs/jemalloc/src/base.c b/contrib/libs/jemalloc/src/base.c index f3c61661a2..dc6e9f6919 100644 --- a/contrib/libs/jemalloc/src/base.c +++ b/contrib/libs/jemalloc/src/base.c @@ -1,39 +1,39 @@ #define JEMALLOC_BASE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sz.h" -/******************************************************************************/ -/* Data. */ - +/******************************************************************************/ +/* Data. */ + static base_t *b0; - + metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT; - + const char *metadata_thp_mode_names[] = { "disabled", "auto", "always" }; -/******************************************************************************/ - +/******************************************************************************/ + static inline bool metadata_thp_madvise(void) { return (metadata_thp_enabled() && (init_system_thp_mode == thp_mode_default)); } - + static void * base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { void *addr; bool zero = true; bool commit = true; - + /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ assert(size == HUGEPAGE_CEILING(size)); size_t alignment = HUGEPAGE; @@ -47,10 +47,10 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) &zero, &commit, ind); post_reentrancy(tsd); } - + return addr; } - + static void base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size) { @@ -113,8 +113,8 @@ label_done: (size & HUGEPAGE_MASK) == 0); pages_nohuge(addr, size); } -} - +} + static void base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, size_t size) { @@ -179,11 +179,11 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { static void * base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, size_t alignment) { - void *ret; - + void *ret; + assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); assert(size == ALIGNMENT_CEILING(size, alignment)); - + *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), alignment) - (uintptr_t)extent_addr_get(extent); ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); @@ -224,10 +224,10 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> LG_HUGEPAGE; assert(base->mapped >= base->n_thp << LG_HUGEPAGE); - } - } + } + } } - + static void * base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, size_t alignment) { @@ -237,8 +237,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); base_extent_bump_alloc_post(base, extent, gap_size, ret, size); return ret; -} - +} + /* * Allocate a block of virtual memory that is large enough to start with a * base_block_t header, followed by an object of specified size and alignment. @@ -272,7 +272,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, if (block == NULL) { return NULL; } - + if (metadata_thp_madvise()) { void *addr = (void *)block; assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && @@ -290,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, malloc_mutex_unlock(tsdn, &base->mtx); } } - + *pind_last = sz_psz2ind(block_size); block->size = block_size; block->next = NULL; @@ -298,8 +298,8 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, base_extent_init(extent_sn_next, &block->extent, (void *)((uintptr_t)block + header_size), block_size - header_size); return block; -} - +} + /* * Allocate an extent that is at least as large as specified size, with * specified alignment. @@ -307,7 +307,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, static extent_t * base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { malloc_mutex_assert_owner(tsdn, &base->mtx); - + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); /* * Drop mutex during base_block_alloc(), because an extent hook will be @@ -320,7 +320,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { malloc_mutex_lock(tsdn, &base->mtx); if (block == NULL) { return NULL; - } + } block->next = base->blocks; base->blocks = block; if (config_stats) { @@ -340,12 +340,12 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { } return &block->extent; } - + base_t * b0get(void) { return b0; -} - +} + base_t * base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { pszind_t pind_last = 0; @@ -392,7 +392,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { return base; } -void +void base_delete(tsdn_t *tsdn, base_t *base) { extent_hooks_t *extent_hooks = base_extent_hooks_get(base); base_block_t *next = base->blocks; @@ -403,27 +403,27 @@ base_delete(tsdn_t *tsdn, base_t *base) { block->size); } while (next != NULL); } - + extent_hooks_t * base_extent_hooks_get(base_t *base) { return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, ATOMIC_ACQUIRE); -} - +} + extent_hooks_t * base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); return old_extent_hooks; } - + static void * base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, size_t *esn) { alignment = QUANTUM_CEILING(alignment); size_t usize = ALIGNMENT_CEILING(size, alignment); size_t asize = usize + alignment - QUANTUM; - + extent_t *extent = NULL; malloc_mutex_lock(tsdn, &base->mtx); for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) { @@ -450,8 +450,8 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, label_return: malloc_mutex_unlock(tsdn, &base->mtx); return ret; -} - +} + /* * base_alloc() returns zeroed memory, which is always demand-zeroed for the * auto arenas, in order to make multi-page sparse data structures such as radix @@ -477,11 +477,11 @@ base_alloc_extent(tsdn_t *tsdn, base_t *base) { return extent; } -void +void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, size_t *mapped, size_t *n_thp) { cassert(config_stats); - + malloc_mutex_lock(tsdn, &base->mtx); assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); @@ -490,25 +490,25 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, *mapped = base->mapped; *n_thp = base->n_thp; malloc_mutex_unlock(tsdn, &base->mtx); -} - -void +} + +void base_prefork(tsdn_t *tsdn, base_t *base) { malloc_mutex_prefork(tsdn, &base->mtx); } - + void base_postfork_parent(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_parent(tsdn, &base->mtx); -} - -void +} + +void base_postfork_child(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_child(tsdn, &base->mtx); } - + bool base_boot(tsdn_t *tsdn) { b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); return (b0 == NULL); -} +} diff --git a/contrib/libs/jemalloc/src/bitmap.c b/contrib/libs/jemalloc/src/bitmap.c index 468b3178eb..1981e8ea17 100644 --- a/contrib/libs/jemalloc/src/bitmap.c +++ b/contrib/libs/jemalloc/src/bitmap.c @@ -1,55 +1,55 @@ #define JEMALLOC_BITMAP_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" - -/******************************************************************************/ - + +/******************************************************************************/ + #ifdef BITMAP_USE_TREE - -void + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { - unsigned i; - size_t group_count; - - assert(nbits > 0); - assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); - - /* - * Compute the number of groups necessary to store nbits bits, and - * progressively work upward through the levels until reaching a level - * that requires only one group. - */ - binfo->levels[0].group_offset = 0; + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; group_count = BITMAP_BITS2GROUPS(nbits); - for (i = 1; group_count > 1; i++) { - assert(i < BITMAP_MAX_LEVELS); - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; group_count = BITMAP_BITS2GROUPS(group_count); - } - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); - binfo->nlevels = i; - binfo->nbits = nbits; -} - + binfo->nlevels = i; + binfo->nbits = nbits; +} + static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { return binfo->levels[binfo->nlevels].group_offset; -} - -void +} + +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { - size_t extra; - unsigned i; - - /* - * Bits are actually inverted with regard to the external bitmap + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap * interface. - */ + */ if (fill) { /* The "filled" bitmap starts out with all 0 bits. */ @@ -64,21 +64,21 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { * significant bits of the last group. */ memset(bitmap, 0xffU, bitmap_size(binfo)); - extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) - & BITMAP_GROUP_NBITS_MASK; + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { - bitmap[binfo->levels[1].group_offset - 1] >>= extra; + bitmap[binfo->levels[1].group_offset - 1] >>= extra; } - for (i = 1; i < binfo->nlevels; i++) { - size_t group_count = binfo->levels[i].group_offset - - binfo->levels[i-1].group_offset; - extra = (BITMAP_GROUP_NBITS - (group_count & - BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { - bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; } - } -} + } +} #else /* BITMAP_USE_TREE */ diff --git a/contrib/libs/jemalloc/src/ckh.c b/contrib/libs/jemalloc/src/ckh.c index 1bf6df5a11..9e48a2e0a2 100644 --- a/contrib/libs/jemalloc/src/ckh.c +++ b/contrib/libs/jemalloc/src/ckh.c @@ -1,42 +1,42 @@ -/* - ******************************************************************************* - * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each - * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash - * functions are employed. The original cuckoo hashing algorithm was described - * in: - * - * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms - * 51(2):122-144. - * - * Generalization of cuckoo hashing was discussed in: - * - * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical - * alternative to traditional hash tables. In Proceedings of the 7th - * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, - * January 2006. - * - * This implementation uses precisely two hash functions because that is the - * fewest that can work, and supporting multiple hashes is an implementation - * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) - * that shows approximate expected maximum load factors for various - * configurations: - * - * | #cells/bucket | - * #hashes | 1 | 2 | 4 | 8 | - * --------+-------+-------+-------+-------+ - * 1 | 0.006 | 0.006 | 0.03 | 0.12 | - * 2 | 0.49 | 0.86 |>0.93< |>0.96< | - * 3 | 0.91 | 0.97 | 0.98 | 0.999 | - * 4 | 0.97 | 0.99 | 0.999 | | - * - * The number of cells per bucket is chosen such that a bucket fits in one cache - * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, - * respectively. - * - ******************************************************************************/ +/* + ******************************************************************************* + * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each + * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash + * functions are employed. The original cuckoo hashing algorithm was described + * in: + * + * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms + * 51(2):122-144. + * + * Generalization of cuckoo hashing was discussed in: + * + * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical + * alternative to traditional hash tables. In Proceedings of the 7th + * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, + * January 2006. + * + * This implementation uses precisely two hash functions because that is the + * fewest that can work, and supporting multiple hashes is an implementation + * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) + * that shows approximate expected maximum load factors for various + * configurations: + * + * | #cells/bucket | + * #hashes | 1 | 2 | 4 | 8 | + * --------+-------+-------+-------+-------+ + * 1 | 0.006 | 0.006 | 0.03 | 0.12 | + * 2 | 0.49 | 0.86 |>0.93< |>0.96< | + * 3 | 0.91 | 0.97 | 0.98 | 0.999 | + * 4 | 0.97 | 0.99 | 0.999 | | + * + * The number of cells per bucket is chosen such that a bucket fits in one cache + * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, + * respectively. + * + ******************************************************************************/ #define JEMALLOC_CKH_C_ #include "jemalloc/internal/jemalloc_preamble.h" - + #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -47,524 +47,524 @@ #include "jemalloc/internal/prng.h" #include "jemalloc/internal/util.h" -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); - -/******************************************************************************/ - -/* - * Search bucket for key and return the cell number if found; SIZE_T_MAX - * otherwise. - */ + +/******************************************************************************/ + +/* + * Search bucket for key and return the cell number if found; SIZE_T_MAX + * otherwise. + */ static size_t ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { - ckhc_t *cell; - unsigned i; - - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + ckhc_t *cell; + unsigned i; + + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; if (cell->key != NULL && ckh->keycomp(key, cell->key)) { return (bucket << LG_CKH_BUCKET_CELLS) + i; } - } - + } + return SIZE_T_MAX; -} - -/* - * Search table for key and return cell number if found; SIZE_T_MAX otherwise. - */ +} + +/* + * Search table for key and return cell number if found; SIZE_T_MAX otherwise. + */ static size_t ckh_isearch(ckh_t *ckh, const void *key) { - size_t hashes[2], bucket, cell; - - assert(ckh != NULL); - - ckh->hash(key, hashes); - - /* Search primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); + size_t hashes[2], bucket, cell; + + assert(ckh != NULL); + + ckh->hash(key, hashes); + + /* Search primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); if (cell != SIZE_T_MAX) { return cell; } - - /* Search secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); + + /* Search secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); return cell; -} - +} + static bool -ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, +ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, const void *data) { - ckhc_t *cell; - unsigned offset, i; - - /* - * Cycle through the cells in the bucket, starting at a random position. - * The randomness avoids worst-case search overhead as buckets fill up. - */ + ckhc_t *cell; + unsigned offset, i; + + /* + * Cycle through the cells in the bucket, starting at a random position. + * The randomness avoids worst-case search overhead as buckets fill up. + */ offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, LG_CKH_BUCKET_CELLS); - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + - ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; - if (cell->key == NULL) { - cell->key = key; - cell->data = data; - ckh->count++; + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; + if (cell->key == NULL) { + cell->key = key; + cell->data = data; + ckh->count++; return false; - } - } - + } + } + return true; -} - -/* - * No space is available in bucket. Randomly evict an item, then try to find an - * alternate location for that item. Iteratively repeat this - * eviction/relocation procedure until either success or detection of an - * eviction/relocation bucket cycle. - */ +} + +/* + * No space is available in bucket. Randomly evict an item, then try to find an + * alternate location for that item. Iteratively repeat this + * eviction/relocation procedure until either success or detection of an + * eviction/relocation bucket cycle. + */ static bool -ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, +ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, void const **argdata) { - const void *key, *data, *tkey, *tdata; - ckhc_t *cell; - size_t hashes[2], bucket, tbucket; - unsigned i; - - bucket = argbucket; - key = *argkey; - data = *argdata; - while (true) { - /* - * Choose a random item within the bucket to evict. This is - * critical to correct function, because without (eventually) - * evicting all items within a bucket during iteration, it - * would be possible to get stuck in an infinite loop if there - * were an item for which both hashes indicated the same - * bucket. - */ + const void *key, *data, *tkey, *tdata; + ckhc_t *cell; + size_t hashes[2], bucket, tbucket; + unsigned i; + + bucket = argbucket; + key = *argkey; + data = *argdata; + while (true) { + /* + * Choose a random item within the bucket to evict. This is + * critical to correct function, because without (eventually) + * evicting all items within a bucket during iteration, it + * would be possible to get stuck in an infinite loop if there + * were an item for which both hashes indicated the same + * bucket. + */ i = (unsigned)prng_lg_range_u64(&ckh->prng_state, LG_CKH_BUCKET_CELLS); - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - assert(cell->key != NULL); - - /* Swap cell->{key,data} and {key,data} (evict). */ - tkey = cell->key; tdata = cell->data; - cell->key = key; cell->data = data; - key = tkey; data = tdata; - -#ifdef CKH_COUNT - ckh->nrelocs++; -#endif - - /* Find the alternate bucket for the evicted item. */ - ckh->hash(key, hashes); - tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (tbucket == bucket) { - tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - - 1); - /* - * It may be that (tbucket == bucket) still, if the - * item's hashes both indicate this bucket. However, - * we are guaranteed to eventually escape this bucket - * during iteration, assuming pseudo-random item - * selection (true randomness would make infinite - * looping a remote possibility). The reason we can - * never get trapped forever is that there are two - * cases: - * - * 1) This bucket == argbucket, so we will quickly - * detect an eviction cycle and terminate. - * 2) An item was evicted to this bucket from another, - * which means that at least one item in this bucket - * has hashes that indicate distinct buckets. - */ - } - /* Check for a cycle. */ - if (tbucket == argbucket) { - *argkey = key; - *argdata = data; + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + assert(cell->key != NULL); + + /* Swap cell->{key,data} and {key,data} (evict). */ + tkey = cell->key; tdata = cell->data; + cell->key = key; cell->data = data; + key = tkey; data = tdata; + +#ifdef CKH_COUNT + ckh->nrelocs++; +#endif + + /* Find the alternate bucket for the evicted item. */ + ckh->hash(key, hashes); + tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (tbucket == bucket) { + tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) + - 1); + /* + * It may be that (tbucket == bucket) still, if the + * item's hashes both indicate this bucket. However, + * we are guaranteed to eventually escape this bucket + * during iteration, assuming pseudo-random item + * selection (true randomness would make infinite + * looping a remote possibility). The reason we can + * never get trapped forever is that there are two + * cases: + * + * 1) This bucket == argbucket, so we will quickly + * detect an eviction cycle and terminate. + * 2) An item was evicted to this bucket from another, + * which means that at least one item in this bucket + * has hashes that indicate distinct buckets. + */ + } + /* Check for a cycle. */ + if (tbucket == argbucket) { + *argkey = key; + *argdata = data; return true; - } - - bucket = tbucket; + } + + bucket = tbucket; if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } - } -} - + } +} + static bool ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { - size_t hashes[2], bucket; - const void *key = *argkey; - const void *data = *argdata; - - ckh->hash(key, hashes); - - /* Try to insert in primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + size_t hashes[2], bucket; + const void *key = *argkey; + const void *data = *argdata; + + ckh->hash(key, hashes); + + /* Try to insert in primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } - - /* Try to insert in secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + + /* Try to insert in secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return false; } - - /* - * Try to find a place for this item via iterative eviction/relocation. - */ + + /* + * Try to find a place for this item via iterative eviction/relocation. + */ return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); -} - -/* - * Try to rebuild the hash table from scratch by inserting all items from the - * old table into the new. - */ +} + +/* + * Try to rebuild the hash table from scratch by inserting all items from the + * old table into the new. + */ static bool ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { - size_t count, i, nins; - const void *key, *data; - - count = ckh->count; - ckh->count = 0; - for (i = nins = 0; nins < count; i++) { - if (aTab[i].key != NULL) { - key = aTab[i].key; - data = aTab[i].data; - if (ckh_try_insert(ckh, &key, &data)) { - ckh->count = count; + size_t count, i, nins; + const void *key, *data; + + count = ckh->count; + ckh->count = 0; + for (i = nins = 0; nins < count; i++) { + if (aTab[i].key != NULL) { + key = aTab[i].key; + data = aTab[i].data; + if (ckh_try_insert(ckh, &key, &data)) { + ckh->count = count; return true; - } - nins++; - } - } - + } + nins++; + } + } + return false; -} - -static bool +} + +static bool ckh_grow(tsd_t *tsd, ckh_t *ckh) { - bool ret; - ckhc_t *tab, *ttab; + bool ret; + ckhc_t *tab, *ttab; unsigned lg_prevbuckets, lg_curcells; - -#ifdef CKH_COUNT - ckh->ngrows++; -#endif - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table will have to be doubled more than once in order to create a - * usable table. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; - while (true) { - size_t usize; - - lg_curcells++; + +#ifdef CKH_COUNT + ckh->ngrows++; +#endif + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table will have to be doubled more than once in order to create a + * usable table. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; + while (true) { + size_t usize; + + lg_curcells++; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { - ret = true; - goto label_return; - } + ret = true; + goto label_return; + } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); - if (tab == NULL) { - ret = true; - goto label_return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - + if (tab == NULL) { + ret = true; + goto label_return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); - break; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ + break; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; - } - - ret = false; -label_return: + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; + } + + ret = false; +label_return: return ret; -} - -static void +} + +static void ckh_shrink(tsd_t *tsd, ckh_t *ckh) { - ckhc_t *tab, *ttab; + ckhc_t *tab, *ttab; size_t usize; unsigned lg_prevbuckets, lg_curcells; - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table rebuild will fail. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table rebuild will fail. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { - return; + return; } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); - if (tab == NULL) { - /* - * An OOM error isn't worth propagating, since it doesn't - * prevent this or future operations from proceeding. - */ - return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - + if (tab == NULL) { + /* + * An OOM error isn't worth propagating, since it doesn't + * prevent this or future operations from proceeding. + */ + return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + if (!ckh_rebuild(ckh, tab)) { idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); -#ifdef CKH_COUNT - ckh->nshrinks++; -#endif - return; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ +#ifdef CKH_COUNT + ckh->nshrinks++; +#endif + return; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; -#ifdef CKH_COUNT - ckh->nshrinkfails++; -#endif -} - -bool + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; +#ifdef CKH_COUNT + ckh->nshrinkfails++; +#endif +} + +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) { - bool ret; - size_t mincells, usize; - unsigned lg_mincells; - - assert(minitems > 0); - assert(hash != NULL); - assert(keycomp != NULL); - -#ifdef CKH_COUNT - ckh->ngrows = 0; - ckh->nshrinks = 0; - ckh->nshrinkfails = 0; - ckh->ninserts = 0; - ckh->nrelocs = 0; -#endif - ckh->prng_state = 42; /* Value doesn't really matter. */ - ckh->count = 0; - - /* + bool ret; + size_t mincells, usize; + unsigned lg_mincells; + + assert(minitems > 0); + assert(hash != NULL); + assert(keycomp != NULL); + +#ifdef CKH_COUNT + ckh->ngrows = 0; + ckh->nshrinks = 0; + ckh->nshrinkfails = 0; + ckh->ninserts = 0; + ckh->nrelocs = 0; +#endif + ckh->prng_state = 42; /* Value doesn't really matter. */ + ckh->count = 0; + + /* * Find the minimum power of 2 that is large enough to fit minitems - * entries. We are using (2+,2) cuckoo hashing, which has an expected - * maximum load factor of at least ~0.86, so 0.75 is a conservative load + * entries. We are using (2+,2) cuckoo hashing, which has an expected + * maximum load factor of at least ~0.86, so 0.75 is a conservative load * factor that will typically allow mincells items to fit without ever - * growing the table. - */ - assert(LG_CKH_BUCKET_CELLS > 0); - mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; - for (lg_mincells = LG_CKH_BUCKET_CELLS; - (ZU(1) << lg_mincells) < mincells; + * growing the table. + */ + assert(LG_CKH_BUCKET_CELLS > 0); + mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; + for (lg_mincells = LG_CKH_BUCKET_CELLS; + (ZU(1) << lg_mincells) < mincells; lg_mincells++) { /* Do nothing. */ } - ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->hash = hash; - ckh->keycomp = keycomp; - + ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->hash = hash; + ckh->keycomp = keycomp; + usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { - ret = true; - goto label_return; - } + ret = true; + goto label_return; + } ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); - if (ckh->tab == NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: + if (ckh->tab == NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: return ret; -} - -void +} + +void ckh_delete(tsd_t *tsd, ckh_t *ckh) { - assert(ckh != NULL); - -#ifdef CKH_VERBOSE - malloc_printf( + assert(ckh != NULL); + +#ifdef CKH_VERBOSE + malloc_printf( "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," " nrelocs: %"FMTu64"\n", __func__, ckh, - (unsigned long long)ckh->ngrows, - (unsigned long long)ckh->nshrinks, - (unsigned long long)ckh->nshrinkfails, - (unsigned long long)ckh->ninserts, - (unsigned long long)ckh->nrelocs); -#endif - + (unsigned long long)ckh->ngrows, + (unsigned long long)ckh->nshrinks, + (unsigned long long)ckh->nshrinkfails, + (unsigned long long)ckh->ninserts, + (unsigned long long)ckh->nrelocs); +#endif + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); if (config_debug) { memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); } -} - -size_t +} + +size_t ckh_count(ckh_t *ckh) { - assert(ckh != NULL); - + assert(ckh != NULL); + return ckh->count; -} - -bool +} + +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { - size_t i, ncells; - - for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + - LG_CKH_BUCKET_CELLS)); i < ncells; i++) { - if (ckh->tab[i].key != NULL) { + size_t i, ncells; + + for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { + if (ckh->tab[i].key != NULL) { if (key != NULL) { - *key = (void *)ckh->tab[i].key; + *key = (void *)ckh->tab[i].key; } if (data != NULL) { - *data = (void *)ckh->tab[i].data; + *data = (void *)ckh->tab[i].data; } - *tabind = i + 1; + *tabind = i + 1; return false; - } - } - + } + } + return true; -} - -bool +} + +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { - bool ret; - - assert(ckh != NULL); - assert(ckh_search(ckh, key, NULL, NULL)); - -#ifdef CKH_COUNT - ckh->ninserts++; -#endif - - while (ckh_try_insert(ckh, &key, &data)) { + bool ret; + + assert(ckh != NULL); + assert(ckh_search(ckh, key, NULL, NULL)); + +#ifdef CKH_COUNT + ckh->ninserts++; +#endif + + while (ckh_try_insert(ckh, &key, &data)) { if (ckh_grow(tsd, ckh)) { - ret = true; - goto label_return; - } - } - - ret = false; -label_return: + ret = true; + goto label_return; + } + } + + ret = false; +label_return: return ret; -} - -bool +} + +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data) { - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { if (key != NULL) { - *key = (void *)ckh->tab[cell].key; + *key = (void *)ckh->tab[cell].key; } if (data != NULL) { - *data = (void *)ckh->tab[cell].data; + *data = (void *)ckh->tab[cell].data; } - ckh->tab[cell].key = NULL; - ckh->tab[cell].data = NULL; /* Not necessary. */ - - ckh->count--; - /* Try to halve the table if it is less than 1/4 full. */ - if (ckh->count < (ZU(1) << (ckh->lg_curbuckets - + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets - > ckh->lg_minbuckets) { - /* Ignore error due to OOM. */ + ckh->tab[cell].key = NULL; + ckh->tab[cell].data = NULL; /* Not necessary. */ + + ckh->count--; + /* Try to halve the table if it is less than 1/4 full. */ + if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets + > ckh->lg_minbuckets) { + /* Ignore error due to OOM. */ ckh_shrink(tsd, ckh); - } - + } + return false; - } - + } + return true; -} - -bool +} + +bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { if (key != NULL) { - *key = (void *)ckh->tab[cell].key; + *key = (void *)ckh->tab[cell].key; } if (data != NULL) { - *data = (void *)ckh->tab[cell].data; + *data = (void *)ckh->tab[cell].data; } return false; - } - + } + return true; -} - -void +} + +void ckh_string_hash(const void *key, size_t r_hash[2]) { - hash(key, strlen((const char *)key), 0x94122f33U, r_hash); -} - -bool + hash(key, strlen((const char *)key), 0x94122f33U, r_hash); +} + +bool ckh_string_keycomp(const void *k1, const void *k2) { assert(k1 != NULL); assert(k2 != NULL); - + return !strcmp((char *)k1, (char *)k2); -} - -void +} + +void ckh_pointer_hash(const void *key, size_t r_hash[2]) { - union { - const void *v; - size_t i; - } u; - - assert(sizeof(u.v) == sizeof(u.i)); - u.v = key; - hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); -} - -bool + union { + const void *v; + size_t i; + } u; + + assert(sizeof(u.v) == sizeof(u.i)); + u.v = key; + hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); +} + +bool ckh_pointer_keycomp(const void *k1, const void *k2) { return (k1 == k2); -} +} diff --git a/contrib/libs/jemalloc/src/ctl.c b/contrib/libs/jemalloc/src/ctl.c index 48afaa61f4..560bff82fc 100644 --- a/contrib/libs/jemalloc/src/ctl.c +++ b/contrib/libs/jemalloc/src/ctl.c @@ -1,7 +1,7 @@ #define JEMALLOC_CTL_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/extent_dss.h" @@ -11,171 +11,171 @@ #include "jemalloc/internal/sc.h" #include "jemalloc/internal/util.h" -/******************************************************************************/ -/* Data. */ - -/* - * ctl_mtx protects the following: +/******************************************************************************/ +/* Data. */ + +/* + * ctl_mtx protects the following: * - ctl_stats->* - */ -static malloc_mutex_t ctl_mtx; -static bool ctl_initialized; + */ +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; static ctl_stats_t *ctl_stats; static ctl_arenas_t *ctl_arenas; - -/******************************************************************************/ -/* Helpers for named and indexed nodes. */ - + +/******************************************************************************/ +/* Helpers for named and indexed nodes. */ + static const ctl_named_node_t * ctl_named_node(const ctl_node_t *node) { - return ((node->named) ? (const ctl_named_node_t *)node : NULL); -} - + return ((node->named) ? (const ctl_named_node_t *)node : NULL); +} + static const ctl_named_node_t * ctl_named_children(const ctl_named_node_t *node, size_t index) { - const ctl_named_node_t *children = ctl_named_node(node->children); - - return (children ? &children[index] : NULL); -} - + const ctl_named_node_t *children = ctl_named_node(node->children); + + return (children ? &children[index] : NULL); +} + static const ctl_indexed_node_t * ctl_indexed_node(const ctl_node_t *node) { return (!node->named ? (const ctl_indexed_node_t *)node : NULL); -} - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - +} + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + #define CTL_PROTO(n) \ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen); - + #define INDEX_PROTO(n) \ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ const size_t *mib, size_t miblen, size_t i); - -CTL_PROTO(version) -CTL_PROTO(epoch) + +CTL_PROTO(version) +CTL_PROTO(epoch) CTL_PROTO(background_thread) CTL_PROTO(max_background_threads) -CTL_PROTO(thread_tcache_enabled) -CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_tcache_enabled) +CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_active) -CTL_PROTO(thread_arena) -CTL_PROTO(thread_allocated) -CTL_PROTO(thread_allocatedp) -CTL_PROTO(thread_deallocated) -CTL_PROTO(thread_deallocatedp) +CTL_PROTO(thread_arena) +CTL_PROTO(thread_allocated) +CTL_PROTO(thread_allocatedp) +CTL_PROTO(thread_deallocated) +CTL_PROTO(thread_deallocatedp) CTL_PROTO(config_cache_oblivious) -CTL_PROTO(config_debug) -CTL_PROTO(config_fill) -CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_debug) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) CTL_PROTO(config_malloc_conf) CTL_PROTO(config_opt_safety_checks) -CTL_PROTO(config_prof) -CTL_PROTO(config_prof_libgcc) -CTL_PROTO(config_prof_libunwind) -CTL_PROTO(config_stats) -CTL_PROTO(config_utrace) -CTL_PROTO(config_xmalloc) -CTL_PROTO(opt_abort) +CTL_PROTO(config_prof) +CTL_PROTO(config_prof_libgcc) +CTL_PROTO(config_prof_libunwind) +CTL_PROTO(config_stats) +CTL_PROTO(config_utrace) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) CTL_PROTO(opt_abort_conf) CTL_PROTO(opt_confirm_conf) CTL_PROTO(opt_metadata_thp) CTL_PROTO(opt_retain) -CTL_PROTO(opt_dss) -CTL_PROTO(opt_narenas) +CTL_PROTO(opt_dss) +CTL_PROTO(opt_narenas) CTL_PROTO(opt_percpu_arena) CTL_PROTO(opt_oversize_threshold) CTL_PROTO(opt_background_thread) CTL_PROTO(opt_max_background_threads) CTL_PROTO(opt_dirty_decay_ms) CTL_PROTO(opt_muzzy_decay_ms) -CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print_opts) -CTL_PROTO(opt_junk) -CTL_PROTO(opt_zero) -CTL_PROTO(opt_utrace) -CTL_PROTO(opt_xmalloc) -CTL_PROTO(opt_tcache) +CTL_PROTO(opt_junk) +CTL_PROTO(opt_zero) +CTL_PROTO(opt_utrace) +CTL_PROTO(opt_xmalloc) +CTL_PROTO(opt_tcache) CTL_PROTO(opt_thp) CTL_PROTO(opt_lg_extent_max_active_fit) -CTL_PROTO(opt_lg_tcache_max) -CTL_PROTO(opt_prof) -CTL_PROTO(opt_prof_prefix) -CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_lg_tcache_max) +CTL_PROTO(opt_prof) +CTL_PROTO(opt_prof_prefix) +CTL_PROTO(opt_prof_active) CTL_PROTO(opt_prof_thread_active_init) -CTL_PROTO(opt_lg_prof_sample) -CTL_PROTO(opt_lg_prof_interval) -CTL_PROTO(opt_prof_gdump) -CTL_PROTO(opt_prof_final) -CTL_PROTO(opt_prof_leak) -CTL_PROTO(opt_prof_accum) +CTL_PROTO(opt_lg_prof_sample) +CTL_PROTO(opt_lg_prof_interval) +CTL_PROTO(opt_prof_gdump) +CTL_PROTO(opt_prof_final) +CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) CTL_PROTO(arena_i_initialized) CTL_PROTO(arena_i_decay) -CTL_PROTO(arena_i_purge) +CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_destroy) -CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dirty_decay_ms) CTL_PROTO(arena_i_muzzy_decay_ms) CTL_PROTO(arena_i_extent_hooks) CTL_PROTO(arena_i_retain_grow_limit) -INDEX_PROTO(arena_i) -CTL_PROTO(arenas_bin_i_size) -CTL_PROTO(arenas_bin_i_nregs) +INDEX_PROTO(arena_i) +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_slab_size) CTL_PROTO(arenas_bin_i_nshards) -INDEX_PROTO(arenas_bin_i) +INDEX_PROTO(arenas_bin_i) CTL_PROTO(arenas_lextent_i_size) INDEX_PROTO(arenas_lextent_i) -CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_dirty_decay_ms) CTL_PROTO(arenas_muzzy_decay_ms) -CTL_PROTO(arenas_quantum) -CTL_PROTO(arenas_page) -CTL_PROTO(arenas_tcache_max) -CTL_PROTO(arenas_nbins) -CTL_PROTO(arenas_nhbins) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_page) +CTL_PROTO(arenas_tcache_max) +CTL_PROTO(arenas_nbins) +CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nlextents) CTL_PROTO(arenas_create) CTL_PROTO(arenas_lookup) CTL_PROTO(prof_thread_active_init) -CTL_PROTO(prof_active) -CTL_PROTO(prof_dump) +CTL_PROTO(prof_active) +CTL_PROTO(prof_dump) CTL_PROTO(prof_gdump) CTL_PROTO(prof_reset) -CTL_PROTO(prof_interval) +CTL_PROTO(prof_interval) CTL_PROTO(lg_prof_sample) CTL_PROTO(prof_log_start) CTL_PROTO(prof_log_stop) -CTL_PROTO(stats_arenas_i_small_allocated) -CTL_PROTO(stats_arenas_i_small_nmalloc) -CTL_PROTO(stats_arenas_i_small_ndalloc) -CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_small_nrequests) CTL_PROTO(stats_arenas_i_small_nfills) CTL_PROTO(stats_arenas_i_small_nflushes) -CTL_PROTO(stats_arenas_i_large_allocated) -CTL_PROTO(stats_arenas_i_large_nmalloc) -CTL_PROTO(stats_arenas_i_large_ndalloc) -CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_large_nfills) CTL_PROTO(stats_arenas_i_large_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nmalloc) -CTL_PROTO(stats_arenas_i_bins_j_ndalloc) -CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_nmalloc) +CTL_PROTO(stats_arenas_i_bins_j_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_curregs) -CTL_PROTO(stats_arenas_i_bins_j_nfills) -CTL_PROTO(stats_arenas_i_bins_j_nflushes) +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nslabs) CTL_PROTO(stats_arenas_i_bins_j_nreslabs) CTL_PROTO(stats_arenas_i_bins_j_curslabs) CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs) -INDEX_PROTO(stats_arenas_i_bins_j) +INDEX_PROTO(stats_arenas_i_bins_j) CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) CTL_PROTO(stats_arenas_i_lextents_j_nrequests) @@ -188,15 +188,15 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes) CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes) CTL_PROTO(stats_arenas_i_extents_j_retained_bytes) INDEX_PROTO(stats_arenas_i_extents_j) -CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_uptime) -CTL_PROTO(stats_arenas_i_dss) +CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dirty_decay_ms) CTL_PROTO(stats_arenas_i_muzzy_decay_ms) -CTL_PROTO(stats_arenas_i_pactive) -CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pmuzzy) -CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_retained) CTL_PROTO(stats_arenas_i_extent_avail) CTL_PROTO(stats_arenas_i_dirty_npurge) @@ -211,16 +211,16 @@ CTL_PROTO(stats_arenas_i_metadata_thp) CTL_PROTO(stats_arenas_i_tcache_bytes) CTL_PROTO(stats_arenas_i_resident) CTL_PROTO(stats_arenas_i_abandoned_vm) -INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_allocated) -CTL_PROTO(stats_active) +INDEX_PROTO(stats_arenas_i) +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) CTL_PROTO(stats_background_thread_num_threads) CTL_PROTO(stats_background_thread_num_runs) CTL_PROTO(stats_background_thread_run_interval) CTL_PROTO(stats_metadata) CTL_PROTO(stats_metadata_thp) CTL_PROTO(stats_resident) -CTL_PROTO(stats_mapped) +CTL_PROTO(stats_mapped) CTL_PROTO(stats_retained) CTL_PROTO(experimental_hooks_install) CTL_PROTO(experimental_hooks_remove) @@ -228,7 +228,7 @@ CTL_PROTO(experimental_utilization_query) CTL_PROTO(experimental_utilization_batch_query) CTL_PROTO(experimental_arenas_i_pactivep) INDEX_PROTO(experimental_arenas_i) - + #define MUTEX_STATS_CTL_PROTO_GEN(n) \ CTL_PROTO(stats_##n##_num_ops) \ CTL_PROTO(stats_##n##_num_wait) \ @@ -254,43 +254,43 @@ MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) CTL_PROTO(stats_mutexes_reset) -/******************************************************************************/ -/* mallctl tree. */ - +/******************************************************************************/ +/* mallctl tree. */ + #define NAME(n) {true}, n #define CHILD(t, c) \ - sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ - (ctl_node_t *)c##_node, \ - NULL + sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ + (ctl_node_t *)c##_node, \ + NULL #define CTL(c) 0, NULL, c##_ctl - -/* - * Only handles internal indexed nodes, since there are currently no external - * ones. - */ + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ #define INDEX(i) {false}, i##_index - + static const ctl_named_node_t thread_tcache_node[] = { - {NAME("enabled"), CTL(thread_tcache_enabled)}, - {NAME("flush"), CTL(thread_tcache_flush)} -}; - + {NAME("enabled"), CTL(thread_tcache_enabled)}, + {NAME("flush"), CTL(thread_tcache_flush)} +}; + static const ctl_named_node_t thread_prof_node[] = { {NAME("name"), CTL(thread_prof_name)}, {NAME("active"), CTL(thread_prof_active)} }; -static const ctl_named_node_t thread_node[] = { - {NAME("arena"), CTL(thread_arena)}, - {NAME("allocated"), CTL(thread_allocated)}, - {NAME("allocatedp"), CTL(thread_allocatedp)}, - {NAME("deallocated"), CTL(thread_deallocated)}, - {NAME("deallocatedp"), CTL(thread_deallocatedp)}, +static const ctl_named_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)}, + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)}, {NAME("tcache"), CHILD(named, thread_tcache)}, {NAME("prof"), CHILD(named, thread_prof)} -}; - -static const ctl_named_node_t config_node[] = { +}; + +static const ctl_named_node_t config_node[] = { {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, {NAME("debug"), CTL(config_debug)}, {NAME("fill"), CTL(config_fill)}, @@ -303,9 +303,9 @@ static const ctl_named_node_t config_node[] = { {NAME("stats"), CTL(config_stats)}, {NAME("utrace"), CTL(config_utrace)}, {NAME("xmalloc"), CTL(config_xmalloc)} -}; - -static const ctl_named_node_t opt_node[] = { +}; + +static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("abort_conf"), CTL(opt_abort_conf)}, {NAME("confirm_conf"), CTL(opt_confirm_conf)}, @@ -339,15 +339,15 @@ static const ctl_named_node_t opt_node[] = { {NAME("prof_final"), CTL(opt_prof_final)}, {NAME("prof_leak"), CTL(opt_prof_leak)}, {NAME("prof_accum"), CTL(opt_prof_accum)} -}; - +}; + static const ctl_named_node_t tcache_node[] = { {NAME("create"), CTL(tcache_create)}, {NAME("flush"), CTL(tcache_flush)}, {NAME("destroy"), CTL(tcache_destroy)} }; -static const ctl_named_node_t arena_i_node[] = { +static const ctl_named_node_t arena_i_node[] = { {NAME("initialized"), CTL(arena_i_initialized)}, {NAME("decay"), CTL(arena_i_decay)}, {NAME("purge"), CTL(arena_i_purge)}, @@ -358,41 +358,41 @@ static const ctl_named_node_t arena_i_node[] = { {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)} -}; -static const ctl_named_node_t super_arena_i_node[] = { +}; +static const ctl_named_node_t super_arena_i_node[] = { {NAME(""), CHILD(named, arena_i)} -}; - -static const ctl_indexed_node_t arena_node[] = { - {INDEX(arena_i)} -}; - -static const ctl_named_node_t arenas_bin_i_node[] = { +}; + +static const ctl_indexed_node_t arena_node[] = { + {INDEX(arena_i)} +}; + +static const ctl_named_node_t arenas_bin_i_node[] = { {NAME("size"), CTL(arenas_bin_i_size)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}, {NAME("nshards"), CTL(arenas_bin_i_nshards)} -}; -static const ctl_named_node_t super_arenas_bin_i_node[] = { +}; +static const ctl_named_node_t super_arenas_bin_i_node[] = { {NAME(""), CHILD(named, arenas_bin_i)} -}; - -static const ctl_indexed_node_t arenas_bin_node[] = { - {INDEX(arenas_bin_i)} -}; - +}; + +static const ctl_indexed_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + static const ctl_named_node_t arenas_lextent_i_node[] = { {NAME("size"), CTL(arenas_lextent_i_size)} -}; +}; static const ctl_named_node_t super_arenas_lextent_i_node[] = { {NAME(""), CHILD(named, arenas_lextent_i)} -}; - +}; + static const ctl_indexed_node_t arenas_lextent_node[] = { {INDEX(arenas_lextent_i)} -}; - -static const ctl_named_node_t arenas_node[] = { +}; + +static const ctl_named_node_t arenas_node[] = { {NAME("narenas"), CTL(arenas_narenas)}, {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, @@ -406,19 +406,19 @@ static const ctl_named_node_t arenas_node[] = { {NAME("lextent"), CHILD(indexed, arenas_lextent)}, {NAME("create"), CTL(arenas_create)}, {NAME("lookup"), CTL(arenas_lookup)} -}; - -static const ctl_named_node_t prof_node[] = { +}; + +static const ctl_named_node_t prof_node[] = { {NAME("thread_active_init"), CTL(prof_thread_active_init)}, - {NAME("active"), CTL(prof_active)}, - {NAME("dump"), CTL(prof_dump)}, + {NAME("active"), CTL(prof_active)}, + {NAME("dump"), CTL(prof_dump)}, {NAME("gdump"), CTL(prof_gdump)}, {NAME("reset"), CTL(prof_reset)}, {NAME("interval"), CTL(prof_interval)}, {NAME("lg_sample"), CTL(lg_prof_sample)}, {NAME("log_start"), CTL(prof_log_start)}, {NAME("log_stop"), CTL(prof_log_stop)} -}; +}; static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, @@ -426,8 +426,8 @@ static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}, {NAME("nfills"), CTL(stats_arenas_i_small_nfills)}, {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)} -}; - +}; + static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, @@ -435,8 +435,8 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}, {NAME("nfills"), CTL(stats_arenas_i_large_nfills)}, {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)} -}; - +}; + #define MUTEX_PROF_DATA_NODE(prefix) \ static const ctl_named_node_t stats_##prefix##_node[] = { \ {NAME("num_ops"), \ @@ -454,11 +454,11 @@ static const ctl_named_node_t stats_##prefix##_node[] = { \ {NAME("max_num_thds"), \ CTL(stats_##prefix##_max_num_thds)} \ /* Note that # of current waiting thread not provided. */ \ -}; - +}; + MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) - -static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { + +static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, @@ -470,30 +470,30 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)}, {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} -}; +}; -static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { +static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_bins_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { - {INDEX(stats_arenas_i_bins_j)} -}; - +}; + +static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} -}; +}; static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} -}; - +}; + static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { {INDEX(stats_arenas_i_lextents_j)} -}; - +}; + static const ctl_named_node_t stats_arenas_i_extents_j_node[] = { {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)}, {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)}, @@ -521,7 +521,7 @@ MUTEX_PROF_ARENA_MUTEXES #undef OP }; -static const ctl_named_node_t stats_arenas_i_node[] = { +static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("uptime"), CTL(stats_arenas_i_uptime)}, {NAME("dss"), CTL(stats_arenas_i_dss)}, @@ -551,15 +551,15 @@ static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)}, {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} -}; -static const ctl_named_node_t super_stats_arenas_i_node[] = { +}; +static const ctl_named_node_t super_stats_arenas_i_node[] = { {NAME(""), CHILD(named, stats_arenas_i)} -}; - -static const ctl_indexed_node_t stats_arenas_node[] = { - {INDEX(stats_arenas_i)} -}; - +}; + +static const ctl_indexed_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + static const ctl_named_node_t stats_background_thread_node[] = { {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, @@ -578,7 +578,7 @@ MUTEX_PROF_GLOBAL_MUTEXES }; #undef MUTEX_PROF_DATA_NODE -static const ctl_named_node_t stats_node[] = { +static const ctl_named_node_t stats_node[] = { {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, @@ -590,8 +590,8 @@ static const ctl_named_node_t stats_node[] = { CHILD(named, stats_background_thread)}, {NAME("mutexes"), CHILD(named, stats_mutexes)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} -}; - +}; + static const ctl_named_node_t experimental_hooks_node[] = { {NAME("install"), CTL(experimental_hooks_install)}, {NAME("remove"), CTL(experimental_hooks_remove)} @@ -619,32 +619,32 @@ static const ctl_named_node_t experimental_node[] = { {NAME("arenas"), CHILD(indexed, experimental_arenas)} }; -static const ctl_named_node_t root_node[] = { - {NAME("version"), CTL(version)}, - {NAME("epoch"), CTL(epoch)}, +static const ctl_named_node_t root_node[] = { + {NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, {NAME("background_thread"), CTL(background_thread)}, {NAME("max_background_threads"), CTL(max_background_threads)}, - {NAME("thread"), CHILD(named, thread)}, - {NAME("config"), CHILD(named, config)}, - {NAME("opt"), CHILD(named, opt)}, + {NAME("thread"), CHILD(named, thread)}, + {NAME("config"), CHILD(named, config)}, + {NAME("opt"), CHILD(named, opt)}, {NAME("tcache"), CHILD(named, tcache)}, - {NAME("arena"), CHILD(indexed, arena)}, - {NAME("arenas"), CHILD(named, arenas)}, - {NAME("prof"), CHILD(named, prof)}, + {NAME("arena"), CHILD(indexed, arena)}, + {NAME("arenas"), CHILD(named, arenas)}, + {NAME("prof"), CHILD(named, prof)}, {NAME("stats"), CHILD(named, stats)}, {NAME("experimental"), CHILD(named, experimental)} -}; -static const ctl_named_node_t super_root_node[] = { - {NAME(""), CHILD(named, root)} -}; - -#undef NAME -#undef CHILD -#undef CTL -#undef INDEX - -/******************************************************************************/ - +}; +static const ctl_named_node_t super_root_node[] = { + {NAME(""), CHILD(named, root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + /* * Sets *dst + *src non-atomically. This is safe, since everything is * synchronized by the ctl mutex. @@ -659,7 +659,7 @@ ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { *dst += *src; #endif } - + /* Likewise: with ctl mutex synchronization, reading is simple. */ static uint64_t ctl_arena_stats_read_u64(arena_stats_u64_t *p) { @@ -712,11 +712,11 @@ arenas_i2a_impl(size_t i, bool compat, bool validate) { a = (unsigned)i + 2; } break; - } - + } + return a; -} - +} + static unsigned arenas_i2a(size_t i) { return arenas_i2a_impl(i, true, false); @@ -765,7 +765,7 @@ arenas_i(size_t i) { return ret; } -static void +static void ctl_arena_clear(ctl_arena_t *ctl_arena) { ctl_arena->nthreads = 0; ctl_arena->dss = dss_prec_names[dss_prec_limit]; @@ -774,7 +774,7 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) { ctl_arena->pactive = 0; ctl_arena->pdirty = 0; ctl_arena->pmuzzy = 0; - if (config_stats) { + if (config_stats) { memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); ctl_arena->astats->allocated_small = 0; ctl_arena->astats->nmalloc_small = 0; @@ -788,13 +788,13 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) { sizeof(arena_stats_large_t)); memset(ctl_arena->astats->estats, 0, SC_NPSIZES * sizeof(arena_stats_extents_t)); - } -} - -static void + } +} + +static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { - unsigned i; - + unsigned i; + if (config_stats) { arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, &ctl_arena->dss, &ctl_arena->dirty_decay_ms, @@ -802,7 +802,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { &ctl_arena->pdirty, &ctl_arena->pmuzzy, &ctl_arena->astats->astats, ctl_arena->astats->bstats, ctl_arena->astats->lstats, ctl_arena->astats->estats); - + for (i = 0; i < SC_NBINS; i++) { ctl_arena->astats->allocated_small += ctl_arena->astats->bstats[i].curregs * @@ -823,14 +823,14 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { &ctl_arena->dss, &ctl_arena->dirty_decay_ms, &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->pdirty, &ctl_arena->pmuzzy); - } -} - -static void + } +} + +static void ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, bool destroyed) { - unsigned i; - + unsigned i; + if (!destroyed) { ctl_sdarena->nthreads += ctl_arena->nthreads; ctl_sdarena->pactive += ctl_arena->pactive; @@ -842,11 +842,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, assert(ctl_arena->pdirty == 0); assert(ctl_arena->pmuzzy == 0); } - + if (config_stats) { ctl_arena_stats_t *sdstats = ctl_sdarena->astats; ctl_arena_stats_t *astats = ctl_arena->astats; - + if (!destroyed) { accum_atomic_zu(&sdstats->astats.mapped, &astats->astats.mapped); @@ -855,21 +855,21 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, accum_atomic_zu(&sdstats->astats.extent_avail, &astats->astats.extent_avail); } - + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, &astats->astats.decay_dirty.npurge); ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, &astats->astats.decay_dirty.nmadvise); ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, &astats->astats.decay_dirty.purged); - + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, &astats->astats.decay_muzzy.npurge); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, &astats->astats.decay_muzzy.nmadvise); ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, &astats->astats.decay_muzzy.purged); - + #define OP(mtx) malloc_mutex_prof_merge( \ &(sdstats->astats.mutex_prof_data[ \ arena_prof_mutex_##mtx]), \ @@ -939,7 +939,7 @@ MUTEX_PROF_ARENA_MUTEXES } sdstats->bstats[i].nfills += astats->bstats[i].nfills; sdstats->bstats[i].nflushes += - astats->bstats[i].nflushes; + astats->bstats[i].nflushes; sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; if (!destroyed) { @@ -953,7 +953,7 @@ MUTEX_PROF_ARENA_MUTEXES } malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, &astats->bstats[i].mutex_data); - } + } /* Merge stats for large allocations. */ for (i = 0; i < SC_NSIZES - SC_NBINS; i++) { @@ -986,20 +986,20 @@ MUTEX_PROF_ARENA_MUTEXES accum_atomic_zu(&sdstats->estats[i].retained_bytes, &astats->estats[i].retained_bytes); } - } -} - -static void + } +} + +static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, unsigned i, bool destroyed) { ctl_arena_t *ctl_arena = arenas_i(i); - + ctl_arena_clear(ctl_arena); ctl_arena_stats_amerge(tsdn, ctl_arena, arena); /* Merge into sum stats as well. */ ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); } - + static unsigned ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { unsigned arena_ind; @@ -1009,28 +1009,28 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { NULL) { ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); arena_ind = ctl_arena->arena_ind; - } else { + } else { arena_ind = ctl_arenas->narenas; - } - + } + /* Trigger stats allocation. */ if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { return UINT_MAX; } - + /* Initialize new arena. */ if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { return UINT_MAX; - } - + } + if (arena_ind == ctl_arenas->narenas) { ctl_arenas->narenas++; - } - + } + return arena_ind; -} - -static void +} + +static void ctl_background_thread_stats_read(tsdn_t *tsdn) { background_thread_stats_t *stats = &ctl_stats->background_thread; if (!have_background_thread || @@ -1042,32 +1042,32 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) { static void ctl_refresh(tsdn_t *tsdn) { - unsigned i; + unsigned i; ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); - - /* - * Clear sum stats, since they will be merged into by - * ctl_arena_refresh(). - */ + + /* + * Clear sum stats, since they will be merged into by + * ctl_arena_refresh(). + */ ctl_arena_clear(ctl_sarena); - + for (i = 0; i < ctl_arenas->narenas; i++) { tarenas[i] = arena_get(tsdn, i, false); - } + } for (i = 0; i < ctl_arenas->narenas; i++) { ctl_arena_t *ctl_arena = arenas_i(i); - bool initialized = (tarenas[i] != NULL); - + bool initialized = (tarenas[i] != NULL); + ctl_arena->initialized = initialized; if (initialized) { ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, false); } - } - - if (config_stats) { + } + + if (config_stats) { ctl_stats->allocated = ctl_sarena->astats->allocated_small + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, ATOMIC_RELAXED); @@ -1110,24 +1110,24 @@ ctl_refresh(tsdn_t *tsdn) { &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], &ctl_mtx); #undef READ_GLOBAL_MUTEX_PROF_DATA - } + } ctl_arenas->epoch++; -} - -static bool +} + +static bool ctl_init(tsd_t *tsd) { - bool ret; + bool ret; tsdn_t *tsdn = tsd_tsdn(tsd); - + malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { ctl_arena_t *ctl_sarena, *ctl_darena; unsigned i; - /* + /* * Allocate demand-zeroed space for pointers to the full * range of supported arena indices. - */ + */ if (ctl_arenas == NULL) { ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, b0get(), sizeof(ctl_arenas_t), QUANTUM); @@ -1153,280 +1153,280 @@ ctl_init(tsd_t *tsd) { */ if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, true)) == NULL) { - ret = true; - goto label_return; - } + ret = true; + goto label_return; + } ctl_sarena->initialized = true; - + if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, false, true)) == NULL) { ret = true; goto label_return; } ctl_arena_clear(ctl_darena); - /* + /* * Don't toggle ctl_darena to initialized until an arena is * actually destroyed, so that arena.<i>.initialized can be used * to query whether the stats are relevant. - */ + */ ctl_arenas->narenas = narenas_total_get(); for (i = 0; i < ctl_arenas->narenas; i++) { if (arenas_i_impl(tsd, i, false, true) == NULL) { ret = true; goto label_return; - } - } - + } + } + ql_new(&ctl_arenas->destroyed); ctl_refresh(tsdn); - ctl_initialized = true; - } - - ret = false; -label_return: + ctl_initialized = true; + } + + ret = false; +label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; -} - -static int +} + +static int ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp) { - int ret; - const char *elm, *tdot, *dot; - size_t elen, i, j; - const ctl_named_node_t *node; - - elm = name; - /* Equivalent to strchrnul(). */ - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - if (elen == 0) { - ret = ENOENT; - goto label_return; - } - node = super_root_node; - for (i = 0; i < *depthp; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - const ctl_named_node_t *pnode = node; - - /* Children are named. */ - for (j = 0; j < node->nchildren; j++) { - const ctl_named_node_t *child = - ctl_named_children(node, j); - if (strlen(child->name) == elen && - strncmp(elm, child->name, elen) == 0) { - node = child; + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_named_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto label_return; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + const ctl_named_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->nchildren; j++) { + const ctl_named_node_t *child = + ctl_named_children(node, j); + if (strlen(child->name) == elen && + strncmp(elm, child->name, elen) == 0) { + node = child; if (nodesp != NULL) { - nodesp[i] = - (const ctl_node_t *)node; + nodesp[i] = + (const ctl_node_t *)node; } - mibp[i] = j; - break; - } - } - if (node == pnode) { - ret = ENOENT; - goto label_return; - } - } else { - uintmax_t index; - const ctl_indexed_node_t *inode; - - /* Children are indexed. */ - index = malloc_strtoumax(elm, NULL, 10); - if (index == UINTMAX_MAX || index > SIZE_T_MAX) { - ret = ENOENT; - goto label_return; - } - - inode = ctl_indexed_node(node->children); + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto label_return; + } + } else { + uintmax_t index; + const ctl_indexed_node_t *inode; + + /* Children are indexed. */ + index = malloc_strtoumax(elm, NULL, 10); + if (index == UINTMAX_MAX || index > SIZE_T_MAX) { + ret = ENOENT; + goto label_return; + } + + inode = ctl_indexed_node(node->children); node = inode->index(tsdn, mibp, *depthp, (size_t)index); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + if (nodesp != NULL) { - nodesp[i] = (const ctl_node_t *)node; + nodesp[i] = (const ctl_node_t *)node; } - mibp[i] = (size_t)index; - } - - if (node->ctl != NULL) { - /* Terminal node. */ - if (*dot != '\0') { - /* - * The name contains more elements than are - * in this path through the tree. - */ - ret = ENOENT; - goto label_return; - } - /* Complete lookup successful. */ - *depthp = i + 1; - break; - } - - /* Update elm. */ - if (*dot == '\0') { - /* No more elements. */ - ret = ENOENT; - goto label_return; - } - elm = &dot[1]; - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : - strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - } - - ret = 0; -label_return: + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto label_return; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto label_return; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +label_return: return ret; -} - -int +} + +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - size_t depth; - ctl_node_t const *nodes[CTL_MAX_DEPTH]; - size_t mib[CTL_MAX_DEPTH]; - const ctl_named_node_t *node; - + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + const ctl_named_node_t *node; + if (!ctl_initialized && ctl_init(tsd)) { - ret = EAGAIN; - goto label_return; - } - - depth = CTL_MAX_DEPTH; + ret = EAGAIN; + goto label_return; + } + + depth = CTL_MAX_DEPTH; ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); if (ret != 0) { - goto label_return; + goto label_return; } - - node = ctl_named_node(nodes[depth-1]); + + node = ctl_named_node(nodes[depth-1]); if (node != NULL && node->ctl) { ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); } else { - /* The name refers to a partial path through the ctl tree. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -int + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { - int ret; - + int ret; + if (!ctl_initialized && ctl_init(tsd)) { - ret = EAGAIN; - goto label_return; - } - + ret = EAGAIN; + goto label_return; + } + ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); -label_return: - return(ret); -} - -int +label_return: + return(ret); +} + +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - const ctl_named_node_t *node; - size_t i; - + int ret; + const ctl_named_node_t *node; + size_t i; + if (!ctl_initialized && ctl_init(tsd)) { - ret = EAGAIN; - goto label_return; - } - - /* Iterate down the tree. */ - node = super_root_node; - for (i = 0; i < miblen; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - /* Children are named. */ - if (node->nchildren <= mib[i]) { - ret = ENOENT; - goto label_return; - } - node = ctl_named_children(node, mib[i]); - } else { - const ctl_indexed_node_t *inode; - - /* Indexed element. */ - inode = ctl_indexed_node(node->children); + ret = EAGAIN; + goto label_return; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + /* Children are named. */ + if (node->nchildren <= mib[i]) { + ret = ENOENT; + goto label_return; + } + node = ctl_named_children(node, mib[i]); + } else { + const ctl_indexed_node_t *inode; + + /* Indexed element. */ + inode = ctl_indexed_node(node->children); node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - } - } - - /* Call the ctl function. */ + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + } + } + + /* Call the ctl function. */ if (node && node->ctl) { ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); } else { - /* Partial MIB. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -bool + /* Partial MIB. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, malloc_mutex_rank_exclusive)) { return true; } - - ctl_initialized = false; - + + ctl_initialized = false; + return false; -} - -void +} + +void ctl_prefork(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &ctl_mtx); -} - -void +} + +void ctl_postfork_parent(tsdn_t *tsdn) { malloc_mutex_postfork_parent(tsdn, &ctl_mtx); -} - -void +} + +void ctl_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &ctl_mtx); -} - -/******************************************************************************/ -/* *_ctl() functions. */ - +} + +/******************************************************************************/ +/* *_ctl() functions. */ + #define READONLY() do { \ - if (newp != NULL || newlen != 0) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + #define WRITEONLY() do { \ - if (oldp != NULL || oldlenp != NULL) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + #define READ_XOR_WRITE() do { \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ newlen != 0)) { \ @@ -1436,28 +1436,28 @@ ctl_postfork_child(tsdn_t *tsdn) { } while (0) #define READ(v, t) do { \ - if (oldp != NULL && oldlenp != NULL) { \ - if (*oldlenp != sizeof(t)) { \ - size_t copylen = (sizeof(t) <= *oldlenp) \ - ? sizeof(t) : *oldlenp; \ - memcpy(oldp, (void *)&(v), copylen); \ - ret = EINVAL; \ - goto label_return; \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&(v), copylen); \ + ret = EINVAL; \ + goto label_return; \ } \ *(t *)oldp = (v); \ - } \ -} while (0) - + } \ +} while (0) + #define WRITE(v, t) do { \ - if (newp != NULL) { \ - if (newlen != sizeof(t)) { \ - ret = EINVAL; \ - goto label_return; \ - } \ - (v) = *(t *)newp; \ - } \ -} while (0) - + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ + } \ +} while (0) + #define MIB_UNSIGNED(v, i) do { \ if (mib[i] > UINT_MAX) { \ ret = EFAULT; \ @@ -1466,124 +1466,124 @@ ctl_postfork_child(tsdn_t *tsdn) { v = (unsigned)mib[i]; \ } while (0) -/* - * There's a lot of code duplication in the following macros due to limitations - * in how nested cpp macros are expanded. - */ +/* + * There's a lot of code duplication in the following macros due to limitations + * in how nested cpp macros are expanded. + */ #define CTL_RO_CLGEN(c, l, n, v, t) \ -static int \ +static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ + int ret; \ + t oldval; \ + \ if (!(c)) { \ return ENOENT; \ } \ if (l) { \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ } \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ if (l) { \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ } \ return ret; \ -} - +} + #define CTL_RO_CGEN(c, n, v, t) \ -static int \ +static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ + int ret; \ + t oldval; \ + \ if (!(c)) { \ return ENOENT; \ } \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return ret; \ -} - +} + #define CTL_RO_GEN(n, v, t) \ -static int \ +static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ + int ret; \ + t oldval; \ + \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return ret; \ -} - -/* - * ctl_mtx is not acquired, under the assumption that no pertinent data will - * mutate during the call. - */ +} + +/* + * ctl_mtx is not acquired, under the assumption that no pertinent data will + * mutate during the call. + */ #define CTL_RO_NL_CGEN(c, n, v, t) \ -static int \ +static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ + int ret; \ + t oldval; \ + \ if (!(c)) { \ return ENOENT; \ } \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ return ret; \ -} - +} + #define CTL_RO_NL_GEN(n, v, t) \ -static int \ +static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ return ret; \ -} - +} + #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ -static int \ +static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ + int ret; \ t oldval; \ - \ + \ if (!(c)) { \ return ENOENT; \ } \ - READONLY(); \ + READONLY(); \ oldval = (m(tsd)); \ READ(oldval, t); \ \ @@ -1600,37 +1600,37 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ t oldval; \ \ READONLY(); \ - oldval = n; \ + oldval = n; \ READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ + \ + ret = 0; \ +label_return: \ return ret; \ -} - -/******************************************************************************/ - -CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) - -static int +} + +/******************************************************************************/ + +CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) + +static int epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - UNUSED uint64_t newval; - + int ret; + UNUSED uint64_t newval; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - WRITE(newval, uint64_t); + WRITE(newval, uint64_t); if (newp != NULL) { ctl_refresh(tsd_tsdn(tsd)); } READ(ctl_arenas->epoch, uint64_t); - - ret = 0; -label_return: + + ret = 0; +label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; -} - +} + static int background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, @@ -1742,8 +1742,8 @@ label_return: return ret; } -/******************************************************************************/ - +/******************************************************************************/ + CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) CTL_RO_CONFIG_GEN(config_debug, bool) CTL_RO_CONFIG_GEN(config_fill, bool) @@ -1756,16 +1756,16 @@ CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) CTL_RO_CONFIG_GEN(config_stats, bool) CTL_RO_CONFIG_GEN(config_utrace, bool) CTL_RO_CONFIG_GEN(config_xmalloc, bool) - -/******************************************************************************/ - -CTL_RO_NL_GEN(opt_abort, opt_abort, bool) + +/******************************************************************************/ + +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool) CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], const char *) CTL_RO_NL_GEN(opt_retain, opt_retain, bool) -CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) +CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], const char *) @@ -1774,55 +1774,55 @@ CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) -CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) -CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) -CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) +CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) +CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, size_t) CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, opt_prof_thread_active_init, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) - -/******************************************************************************/ - -static int +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) + +/******************************************************************************/ + +static int thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; arena_t *oldarena; - unsigned newind, oldind; - + unsigned newind, oldind; + oldarena = arena_choose(tsd, NULL); if (oldarena == NULL) { return EAGAIN; } newind = oldind = arena_ind_get(oldarena); - WRITE(newind, unsigned); - READ(oldind, unsigned); + WRITE(newind, unsigned); + READ(oldind, unsigned); - if (newind != oldind) { + if (newind != oldind) { arena_t *newarena; - + if (newind >= narenas_total_get()) { - /* New arena index is out of range. */ - ret = EFAULT; - goto label_return; - } - + /* New arena index is out of range. */ + ret = EFAULT; + goto label_return; + } + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { @@ -1836,12 +1836,12 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, } } - /* Initialize arena if necessary. */ + /* Initialize arena if necessary. */ newarena = arena_get(tsd_tsdn(tsd), newind, true); if (newarena == NULL) { - ret = EAGAIN; - goto label_return; - } + ret = EAGAIN; + goto label_return; + } /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); if (tcache_available(tsd)) { @@ -1849,7 +1849,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, tsd_tcachep_get(tsd), newarena); } } - + ret = 0; label_return: return ret; @@ -1876,22 +1876,22 @@ thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; - } + } tcache_enabled_set(tsd, *(bool *)newp); - } + } READ(oldval, bool); - - ret = 0; -label_return: + + ret = 0; +label_return: return ret; -} - +} + static int thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - + if (!tcache_available(tsd)) { ret = EFAULT; goto label_return; @@ -1907,11 +1907,11 @@ label_return: return ret; } -static int +static int thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; if (!config_prof) { return ENOENT; @@ -1944,45 +1944,45 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - bool oldval; - + bool oldval; + if (!config_prof) { return ENOENT; } - + oldval = prof_thread_active_get(tsd); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } if (prof_thread_active_set(tsd, *(bool *)newp)) { ret = EAGAIN; goto label_return; } - } - READ(oldval, bool); - - ret = 0; -label_return: + } + READ(oldval, bool); + + ret = 0; +label_return: return ret; -} - +} + /******************************************************************************/ -static int +static int tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned tcache_ind; - + READONLY(); if (tcaches_create(tsd, &tcache_ind)) { ret = EFAULT; goto label_return; } READ(tcache_ind, unsigned); - + ret = 0; label_return: return ret; @@ -1994,7 +1994,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, int ret; unsigned tcache_ind; - WRITEONLY(); + WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { @@ -2002,12 +2002,12 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, goto label_return; } tcaches_flush(tsd, tcache_ind); - + ret = 0; label_return: return ret; } - + static int tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { @@ -2023,13 +2023,13 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, } tcaches_destroy(tsd, tcache_ind); - ret = 0; -label_return: + ret = 0; +label_return: return ret; -} - -/******************************************************************************/ - +} + +/******************************************************************************/ + static int arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { @@ -2052,12 +2052,12 @@ label_return: return ret; } -static void +static void arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { malloc_mutex_lock(tsdn, &ctl_mtx); { unsigned narenas = ctl_arenas->narenas; - + /* * Access via index narenas is deprecated, and scheduled for * removal in 6.0.0. @@ -2065,7 +2065,7 @@ arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, narenas); - + for (i = 0; i < narenas; i++) { tarenas[i] = arena_get(tsdn, i, false); } @@ -2095,27 +2095,27 @@ arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { if (tarena != NULL) { arena_decay(tsdn, tarena, false, all); } - } - } -} - -static int + } + } +} + +static int arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned arena_ind; - - READONLY(); - WRITEONLY(); + + READONLY(); + WRITEONLY(); MIB_UNSIGNED(arena_ind, 1); arena_i_decay(tsd_tsdn(tsd), arena_ind, false); - - ret = 0; -label_return: + + ret = 0; +label_return: return ret; -} - -static int +} + +static int arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; @@ -2250,11 +2250,11 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; const char *dss = NULL; unsigned arena_ind; - dss_prec_t dss_prec_old = dss_prec_limit; - dss_prec_t dss_prec = dss_prec_limit; - + dss_prec_t dss_prec_old = dss_prec_limit; + dss_prec_t dss_prec = dss_prec_limit; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - WRITE(dss, const char *); + WRITE(dss, const char *); MIB_UNSIGNED(arena_ind, 1); if (dss != NULL) { int i; @@ -2266,14 +2266,14 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, match = true; break; } - } + } if (!match) { ret = EINVAL; goto label_return; } - } - + } + /* * Access via index narenas is deprecated, and scheduled for removal in * 6.0.0. @@ -2286,7 +2286,7 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, goto label_return; } dss_prec_old = extent_dss_prec_get(); - } else { + } else { arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL || (dss_prec != dss_prec_limit && arena_dss_prec_set(arena, dss_prec))) { @@ -2294,10 +2294,10 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, goto label_return; } dss_prec_old = arena_dss_prec_get(arena); - } + } - dss = dss_prec_names[dss_prec_old]; - READ(dss, const char *); + dss = dss_prec_names[dss_prec_old]; + READ(dss, const char *); ret = 0; label_return: @@ -2315,10 +2315,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, MIB_UNSIGNED(arena_ind, 1); arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { - ret = EFAULT; - goto label_return; - } - + ret = EFAULT; + goto label_return; + } + if (oldp != NULL && oldlenp != NULL) { size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : arena_muzzy_decay_ms_get(arena); @@ -2348,18 +2348,18 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, } } - ret = 0; -label_return: + ret = 0; +label_return: return ret; -} - +} + static int arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, newlen, true); } - + static int arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { @@ -2414,14 +2414,14 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, } } else { ret = EFAULT; - goto label_return; - } + goto label_return; + } ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } - + static int arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, @@ -2477,46 +2477,46 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, break; } - ret = super_arena_i_node; -label_return: + ret = super_arena_i_node; +label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; -} - -/******************************************************************************/ - -static int +} + +/******************************************************************************/ + +static int arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - unsigned narenas; - + int ret; + unsigned narenas; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - READONLY(); - if (*oldlenp != sizeof(unsigned)) { - ret = EINVAL; - goto label_return; - } + READONLY(); + if (*oldlenp != sizeof(unsigned)) { + ret = EINVAL; + goto label_return; + } narenas = ctl_arenas->narenas; - READ(narenas, unsigned); - - ret = 0; -label_return: + READ(narenas, unsigned); + + ret = 0; +label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; -} - -static int +} + +static int arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { - int ret; - + int ret; + if (oldp != NULL && oldlenp != NULL) { size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : arena_muzzy_decay_ms_default_get()); READ(oldval, ssize_t); - } + } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; @@ -2528,12 +2528,12 @@ arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, goto label_return; } } - + ret = 0; -label_return: +label_return: return ret; -} - +} + static int arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { @@ -2548,8 +2548,8 @@ arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, newlen, false); } -CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) -CTL_RO_NL_GEN(arenas_page, PAGE, size_t) +CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned) CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) @@ -2557,34 +2557,34 @@ CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t) -static const ctl_named_node_t * +static const ctl_named_node_t * arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > SC_NBINS) { return NULL; } return super_arenas_bin_i_node; -} - +} + CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned) CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]), size_t) -static const ctl_named_node_t * +static const ctl_named_node_t * arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > SC_NSIZES - SC_NBINS) { return NULL; } return super_arenas_lextent_i_node; -} - -static int +} + +static int arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; extent_hooks_t *extent_hooks; - unsigned arena_ind; - + unsigned arena_ind; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); extent_hooks = (extent_hooks_t *)&extent_hooks_default; @@ -2592,33 +2592,33 @@ arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { ret = EAGAIN; goto label_return; - } + } READ(arena_ind, unsigned); - + ret = 0; -label_return: +label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; -} - -static int +} + +static int arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned arena_ind; void *ptr; extent_t *extent; arena_t *arena; - + ptr = NULL; ret = EINVAL; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(ptr, void *); extent = iealloc(tsd_tsdn(tsd), ptr); if (extent == NULL) - goto label_return; - + goto label_return; + arena = extent_arena_get(extent); if (arena == NULL) goto label_return; @@ -2626,26 +2626,26 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, arena_ind = arena_ind_get(arena); READ(arena_ind, unsigned); - ret = 0; -label_return: + ret = 0; +label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; -} - -/******************************************************************************/ - -static int +} + +/******************************************************************************/ + +static int prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - bool oldval; - + int ret; + bool oldval; + if (!config_prof) { return ENOENT; } - - if (newp != NULL) { + + if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; @@ -2654,18 +2654,18 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, *(bool *)newp); } else { oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); - } - READ(oldval, bool); - - ret = 0; -label_return: + } + READ(oldval, bool); + + ret = 0; +label_return: return ret; -} - -static int +} + +static int prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; bool oldval; if (!config_prof) { @@ -2692,25 +2692,25 @@ static int prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - const char *filename = NULL; - + const char *filename = NULL; + if (!config_prof) { return ENOENT; } - - WRITEONLY(); - WRITE(filename, const char *); - + + WRITEONLY(); + WRITE(filename, const char *); + if (prof_mdump(tsd, filename)) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: return ret; -} - +} + static int prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { @@ -2760,9 +2760,9 @@ label_return: return ret; } -CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) +CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) - + static int prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { @@ -2801,8 +2801,8 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, return 0; } -/******************************************************************************/ - +/******************************************************************************/ + CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) @@ -2810,14 +2810,14 @@ CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) - + CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, ctl_stats->background_thread.num_threads, size_t) CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, ctl_stats->background_thread.num_runs, uint64_t) CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) - + CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, ssize_t) @@ -2829,7 +2829,7 @@ CTL_RO_GEN(stats_arenas_i_uptime, CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_retained, @@ -2839,7 +2839,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail, atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail, ATOMIC_RELAXED), size_t) - + CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) @@ -2879,28 +2879,28 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm, atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm, ATOMIC_RELAXED), size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, +CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, arenas_i(mib[2])->astats->allocated_small, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, arenas_i(mib[2])->astats->nmalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, +CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, arenas_i(mib[2])->astats->ndalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, arenas_i(mib[2])->astats->nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills, arenas_i(mib[2])->astats->nfills_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes, arenas_i(mib[2])->astats->nflushes_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, +CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, ATOMIC_RELAXED), size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, +CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t) /* @@ -2913,7 +2913,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t) - + /* Lock profiling related APIs below. */ #define RO_MUTEX_CTL_GEN(n, l) \ CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ @@ -3004,11 +3004,11 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, return 0; } -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) @@ -3024,8 +3024,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs, arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t) - -static const ctl_named_node_t * + +static const ctl_named_node_t * stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > SC_NBINS) { @@ -3033,7 +3033,7 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, } return super_stats_arenas_i_bins_j_node; } - + CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) @@ -3053,8 +3053,8 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, return NULL; } return super_stats_arenas_i_lextents_j_node; -} - +} + CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].ndirty, @@ -3079,8 +3079,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes, atomic_load_zu( &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, ATOMIC_RELAXED), size_t); - -static const ctl_named_node_t * + +static const ctl_named_node_t * stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j >= SC_NPSIZES) { @@ -3088,7 +3088,7 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, } return super_stats_arenas_i_extents_j_node; } - + static bool ctl_arenas_i_verify(size_t i) { size_t a = arenas_i2a_impl(i, true, true); @@ -3097,24 +3097,24 @@ ctl_arenas_i_verify(size_t i) { } return false; -} - -static const ctl_named_node_t * +} + +static const ctl_named_node_t * stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; - + malloc_mutex_lock(tsdn, &ctl_mtx); if (ctl_arenas_i_verify(i)) { - ret = NULL; - goto label_return; - } - - ret = super_stats_arenas_i_node; -label_return: + ret = NULL; + goto label_return; + } + + ret = super_stats_arenas_i_node; +label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); return ret; -} +} static int experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, diff --git a/contrib/libs/jemalloc/src/extent.c b/contrib/libs/jemalloc/src/extent.c index 9237f903dc..b4ba16be89 100644 --- a/contrib/libs/jemalloc/src/extent.c +++ b/contrib/libs/jemalloc/src/extent.c @@ -1,7 +1,7 @@ #define JEMALLOC_EXTENT_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" @@ -10,15 +10,15 @@ #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_pool.h" -/******************************************************************************/ +/******************************************************************************/ /* Data. */ - + rtree_t extents_rtree; /* Keyed by the address of the extent_t being protected. */ mutex_pool_t extent_mutex_pool; - + size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; - + static const bitmap_info_t extents_bitmap_info = BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); @@ -136,8 +136,8 @@ extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, &extents_rtree, elm, true))) { return lock_result_no_extent; - } - + } + /* * It's possible that the extent changed out from under us, and with it * the leaf->extent mapping. We have to recheck while holding the lock. @@ -153,8 +153,8 @@ extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, extent_unlock(tsdn, extent1); return lock_result_failure; } -} - +} + /* * Returns a pool-locked extent_t * if there's one associated with the given * address, and NULL otherwise. @@ -175,7 +175,7 @@ extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr, } while (lock_result == lock_result_failure); return ret; } - + extent_t * extent_alloc(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); @@ -189,15 +189,15 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); return extent; } - + void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); extent_avail_insert(&arena->extent_avail, extent); atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); -} - +} + extent_hooks_t * extent_hooks_get(arena_t *arena) { return base_extent_hooks_get(arena->base); diff --git a/contrib/libs/jemalloc/src/jemalloc.c b/contrib/libs/jemalloc/src/jemalloc.c index 63ff26b656..1e9a3ba934 100644 --- a/contrib/libs/jemalloc/src/jemalloc.c +++ b/contrib/libs/jemalloc/src/jemalloc.c @@ -1,7 +1,7 @@ #define JEMALLOC_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/ctl.h" @@ -20,22 +20,22 @@ #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/util.h" -/******************************************************************************/ -/* Data. */ - -/* Runtime configuration options. */ +/******************************************************************************/ +/* Data. */ + +/* Runtime configuration options. */ const char *je_malloc_conf #ifndef _WIN32 JEMALLOC_ATTR(weak) #endif ; -bool opt_abort = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; +bool opt_abort = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; bool opt_abort_conf = #ifdef JEMALLOC_DEBUG true @@ -46,7 +46,7 @@ bool opt_abort_conf = /* Intentionally default off, even with debug builds. */ bool opt_confirm_conf = false; const char *opt_junk = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) "true" #else "false" @@ -54,11 +54,11 @@ const char *opt_junk = ; bool opt_junk_alloc = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - true -#else - false -#endif - ; + true +#else + false +#endif + ; bool opt_junk_free = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true @@ -67,13 +67,13 @@ bool opt_junk_free = #endif ; -bool opt_utrace = false; -bool opt_xmalloc = false; -bool opt_zero = false; +bool opt_utrace = false; +bool opt_xmalloc = false; +bool opt_zero = false; unsigned opt_narenas = 0; - -unsigned ncpus; - + +unsigned ncpus; + /* Protects arenas initialization. */ malloc_mutex_t arenas_lock; /* @@ -91,9 +91,9 @@ atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; static atomic_u_t narenas_total; /* Use narenas_total_*(). */ /* Below three are read-only after initialization. */ static arena_t *a0; /* arenas[0]. */ -unsigned narenas_auto; +unsigned narenas_auto; unsigned manual_arena_base; - + typedef enum { malloc_init_uninitialized = 3, malloc_init_a0_initialized = 2, @@ -101,7 +101,7 @@ typedef enum { malloc_init_initialized = 0 /* Common case --> jnz. */ } malloc_init_t; static malloc_init_t malloc_init_state = malloc_init_uninitialized; - + /* False should be the common case. Set to true to trigger initialization. */ bool malloc_slow = true; @@ -115,29 +115,29 @@ enum { }; static uint8_t malloc_slow_flags; -#ifdef JEMALLOC_THREADED_INIT -/* Used to let the initializing thread recursively allocate. */ -# define NO_INITIALIZER ((unsigned long)0) -# define INITIALIZER pthread_self() -# define IS_INITIALIZER (malloc_initializer == pthread_self()) -static pthread_t malloc_initializer = NO_INITIALIZER; -#else -# define NO_INITIALIZER false -# define INITIALIZER true -# define IS_INITIALIZER malloc_initializer -static bool malloc_initializer = NO_INITIALIZER; -#endif - -/* Used to avoid initialization races. */ -#ifdef _WIN32 +#ifdef JEMALLOC_THREADED_INIT +/* Used to let the initializing thread recursively allocate. */ +# define NO_INITIALIZER ((unsigned long)0) +# define INITIALIZER pthread_self() +# define IS_INITIALIZER (malloc_initializer == pthread_self()) +static pthread_t malloc_initializer = NO_INITIALIZER; +#else +# define NO_INITIALIZER false +# define INITIALIZER true +# define IS_INITIALIZER malloc_initializer +static bool malloc_initializer = NO_INITIALIZER; +#endif + +/* Used to avoid initialization races. */ +#ifdef _WIN32 #if _WIN32_WINNT >= 0x0600 static malloc_mutex_t init_lock = SRWLOCK_INIT; #else -static malloc_mutex_t init_lock; +static malloc_mutex_t init_lock; static bool init_lock_initialized = false; - -JEMALLOC_ATTR(constructor) -static void WINAPI + +JEMALLOC_ATTR(constructor) +static void WINAPI _init_init_lock(void) { /* * If another constructor in the same binary is using mallctl to e.g. @@ -154,57 +154,57 @@ _init_init_lock(void) { malloc_mutex_rank_exclusive); } init_lock_initialized = true; -} - -#ifdef _MSC_VER -# pragma section(".CRT$XCU", read) -JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) -static const void (WINAPI *init_init_lock)(void) = _init_init_lock; -#endif +} + +#ifdef _MSC_VER +# pragma section(".CRT$XCU", read) +JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) +static const void (WINAPI *init_init_lock)(void) = _init_init_lock; +#endif #endif -#else -static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; -#endif - -typedef struct { - void *p; /* Input pointer (as in realloc(p, s)). */ - size_t s; /* Request size. */ - void *r; /* Result pointer. */ -} malloc_utrace_t; - -#ifdef JEMALLOC_UTRACE -# define UTRACE(a, b, c) do { \ +#else +static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; +#endif + +typedef struct { + void *p; /* Input pointer (as in realloc(p, s)). */ + size_t s; /* Request size. */ + void *r; /* Result pointer. */ +} malloc_utrace_t; + +#ifdef JEMALLOC_UTRACE +# define UTRACE(a, b, c) do { \ if (unlikely(opt_utrace)) { \ - int utrace_serrno = errno; \ - malloc_utrace_t ut; \ - ut.p = (a); \ - ut.s = (b); \ - ut.r = (c); \ - utrace(&ut, sizeof(ut)); \ - errno = utrace_serrno; \ - } \ -} while (0) -#else -# define UTRACE(a, b, c) -#endif - + int utrace_serrno = errno; \ + malloc_utrace_t ut; \ + ut.p = (a); \ + ut.s = (b); \ + ut.r = (c); \ + utrace(&ut, sizeof(ut)); \ + errno = utrace_serrno; \ + } \ +} while (0) +#else +# define UTRACE(a, b, c) +#endif + /* Whether encountered any invalid config options. */ static bool had_conf_error = false; -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + static bool malloc_init_hard_a0(void); -static bool malloc_init_hard(void); - -/******************************************************************************/ -/* - * Begin miscellaneous support functions. - */ - +static bool malloc_init_hard(void); + +/******************************************************************************/ +/* + * Begin miscellaneous support functions. + */ + bool malloc_initialized(void) { return (malloc_init_state == malloc_init_initialized); @@ -313,7 +313,7 @@ narenas_total_get(void) { return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); } -/* Create a new arena and insert it into the arenas array at index ind. */ +/* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; @@ -360,10 +360,10 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { } } -arena_t * +arena_t * arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; - + malloc_mutex_lock(tsdn, &arenas_lock); arena = arena_init_locked(tsdn, ind, extent_hooks); malloc_mutex_unlock(tsdn, &arenas_lock); @@ -390,9 +390,9 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) { bin_infos[i].n_shards <= BIN_SHARDS_MAX); bins->binshard[i] = shard % bin_infos[i].n_shards; } - } + } } - + void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; @@ -426,10 +426,10 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); unsigned narenas_actual = narenas_total_get(); - /* + /* * Dissociate old tdata array (and set up for deallocation upon return) * if it's too small. - */ + */ if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { arenas_tdata_old = arenas_tdata; narenas_tdata_old = narenas_tdata; @@ -441,7 +441,7 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { arenas_tdata_old = NULL; narenas_tdata_old = 0; } - + /* Allocate tdata array if it's missing. */ if (arenas_tdata == NULL) { bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); @@ -492,13 +492,13 @@ label_return: a0dalloc(arenas_tdata_old); } return tdata; -} - +} + /* Slow path, called only by arena_choose(). */ -arena_t * +arena_t * arena_choose_hard(tsd_t *tsd, bool internal) { arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); - + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { unsigned choose = percpu_arena_choose(); ret = arena_get(tsd_tsdn(tsd), choose, true); @@ -509,10 +509,10 @@ arena_choose_hard(tsd_t *tsd, bool internal) { return ret; } - if (narenas_auto > 1) { + if (narenas_auto > 1) { unsigned i, j, choose[2], first_null; bool is_new_arena[2]; - + /* * Determine binding for both non-internal and internal * allocation. @@ -526,15 +526,15 @@ arena_choose_hard(tsd_t *tsd, bool internal) { is_new_arena[j] = false; } - first_null = narenas_auto; + first_null = narenas_auto; malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); - for (i = 1; i < narenas_auto; i++) { + for (i = 1; i < narenas_auto; i++) { if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { - /* - * Choose the first arena that has the lowest - * number of threads assigned to it. - */ + /* + * Choose the first arena that has the lowest + * number of threads assigned to it. + */ for (j = 0; j < 2; j++) { if (arena_nthreads_get(arena_get( tsd_tsdn(tsd), i, false), !!j) < @@ -544,20 +544,20 @@ arena_choose_hard(tsd_t *tsd, bool internal) { choose[j] = i; } } - } else if (first_null == narenas_auto) { - /* - * Record the index of the first uninitialized - * arena, in case all extant arenas are in use. - * - * NB: It is possible for there to be - * discontinuities in terms of initialized - * versus uninitialized arenas, due to the - * "thread.arena" mallctl. - */ - first_null = i; - } - } - + } else if (first_null == narenas_auto) { + /* + * Record the index of the first uninitialized + * arena, in case all extant arenas are in use. + * + * NB: It is possible for there to be + * discontinuities in terms of initialized + * versus uninitialized arenas, due to the + * "thread.arena" mallctl. + */ + first_null = i; + } + } + for (j = 0; j < 2; j++) { if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), choose[j], false), !!j) == 0 || first_null == @@ -589,7 +589,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) { } } arena_bind(tsd, choose[j], !!j); - } + } malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); for (j = 0; j < 2; j++) { @@ -600,15 +600,15 @@ arena_choose_hard(tsd_t *tsd, bool internal) { } } - } else { + } else { ret = arena_get(tsd_tsdn(tsd), 0, false); arena_bind(tsd, 0, false); arena_bind(tsd, 0, true); - } - + } + return ret; } - + void iarena_cleanup(tsd_t *tsd) { arena_t *iarena; @@ -617,8 +617,8 @@ iarena_cleanup(tsd_t *tsd) { if (iarena != NULL) { arena_unbind(tsd, arena_ind_get(iarena), true); } -} - +} + void arena_cleanup(tsd_t *tsd) { arena_t *arena; @@ -643,39 +643,39 @@ arenas_tdata_cleanup(tsd_t *tsd) { } } -static void +static void stats_print_atexit(void) { if (config_stats) { tsdn_t *tsdn; - unsigned narenas, i; - + unsigned narenas, i; + tsdn = tsdn_fetch(); - /* - * Merge stats from extant threads. This is racy, since - * individual threads do not lock when recording tcache stats - * events. As a consequence, the final stats may be slightly - * out of date by the time they are reported, if other threads - * continue to allocate. - */ - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + /* + * Merge stats from extant threads. This is racy, since + * individual threads do not lock when recording tcache stats + * events. As a consequence, the final stats may be slightly + * out of date by the time they are reported, if other threads + * continue to allocate. + */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena = arena_get(tsdn, i, false); - if (arena != NULL) { - tcache_t *tcache; - + if (arena != NULL) { + tcache_t *tcache; + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); - ql_foreach(tcache, &arena->tcache_ql, link) { + ql_foreach(tcache, &arena->tcache_ql, link) { tcache_stats_merge(tsdn, tcache, arena); - } + } malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); - } - } - } + } + } + } je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); -} - -/* +} + +/* * Ensure that we don't hold any locks upon entry to or exit from allocator * code (in a "broad" sense that doesn't count a reentrant allocation as an * entrance or exit). @@ -701,13 +701,13 @@ check_entry_exit_locking(tsdn_t *tsdn) { } /* - * End miscellaneous support functions. - */ -/******************************************************************************/ -/* - * Begin initialization functions. - */ - + * End miscellaneous support functions. + */ +/******************************************************************************/ +/* + * Begin initialization functions. + */ + static char * jemalloc_secure_getenv(const char *name) { #ifdef JEMALLOC_HAVE_SECURE_GETENV @@ -722,14 +722,14 @@ jemalloc_secure_getenv(const char *name) { #endif } -static unsigned +static unsigned malloc_ncpus(void) { - long result; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwNumberOfProcessors; + long result; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwNumberOfProcessors; #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) /* * glibc >= 2.6 has the CPU_COUNT macro. @@ -744,17 +744,17 @@ malloc_ncpus(void) { pthread_getaffinity_np(pthread_self(), sizeof(set), &set); result = CPU_COUNT(&set); } -#else - result = sysconf(_SC_NPROCESSORS_ONLN); -#endif - return ((result == -1) ? 1 : (unsigned)result); -} - +#else + result = sysconf(_SC_NPROCESSORS_ONLN); +#endif + return ((result == -1) ? 1 : (unsigned)result); +} + static void init_opt_stats_print_opts(const char *v, size_t vlen) { size_t opts_len = strlen(opt_stats_print_opts); assert(opts_len <= stats_print_tot_num_options); - + for (size_t i = 0; i < vlen; i++) { switch (v[i]) { #define OPTION(o, v, d, s) case o: break; @@ -762,19 +762,19 @@ init_opt_stats_print_opts(const char *v, size_t vlen) { #undef OPTION default: continue; } - + if (strchr(opt_stats_print_opts, v[i]) != NULL) { /* Ignore repeated. */ continue; } - + opt_stats_print_opts[opts_len++] = v[i]; opt_stats_print_opts[opts_len] = '\0'; assert(opts_len <= stats_print_tot_num_options); } assert(opts_len == strlen(opt_stats_print_opts)); -} - +} + /* Reads the next size pair in a multi-sized option. */ static bool malloc_conf_multi_sizes_next(const char **slab_size_segment_cur, @@ -782,9 +782,9 @@ malloc_conf_multi_sizes_next(const char **slab_size_segment_cur, const char *cur = *slab_size_segment_cur; char *end; uintmax_t um; - + set_errno(0); - + /* First number, then '-' */ um = malloc_strtoumax(cur, &end, 0); if (get_errno() != 0 || *end != '-') { @@ -817,84 +817,84 @@ malloc_conf_multi_sizes_next(const char **slab_size_segment_cur, *slab_size_segment_cur = end; return false; -} - -static bool -malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, +} + +static bool +malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, char const **v_p, size_t *vlen_p) { - bool accept; - const char *opts = *opts_p; - - *k_p = opts; - + bool accept; + const char *opts = *opts_p; + + *k_p = opts; + for (accept = false; !accept;) { - switch (*opts) { - case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': - case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': - case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': - case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': - case 'Y': case 'Z': - case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': - case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': - case 's': case 't': case 'u': case 'v': case 'w': case 'x': - case 'y': case 'z': - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': - case '_': - opts++; - break; - case ':': - opts++; - *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; - *v_p = opts; - accept = true; - break; - case '\0': - if (opts != *opts_p) { - malloc_write("<jemalloc>: Conf string ends " - "with key\n"); - } + switch (*opts) { + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': + case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': + case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': + case 'Y': case 'Z': + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': + case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': + case 's': case 't': case 'u': case 'v': case 'w': case 'x': + case 'y': case 'z': + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + case '_': + opts++; + break; + case ':': + opts++; + *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; + *v_p = opts; + accept = true; + break; + case '\0': + if (opts != *opts_p) { + malloc_write("<jemalloc>: Conf string ends " + "with key\n"); + } return true; - default: - malloc_write("<jemalloc>: Malformed conf string\n"); + default: + malloc_write("<jemalloc>: Malformed conf string\n"); return true; - } - } - + } + } + for (accept = false; !accept;) { - switch (*opts) { - case ',': - opts++; - /* - * Look ahead one character here, because the next time - * this function is called, it will assume that end of - * input has been cleanly reached if no input remains, - * but we have optimistically already consumed the - * comma if one exists. - */ - if (*opts == '\0') { - malloc_write("<jemalloc>: Conf string ends " - "with comma\n"); - } - *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; - accept = true; - break; - case '\0': - *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; - accept = true; - break; - default: - opts++; - break; - } - } - - *opts_p = opts; + switch (*opts) { + case ',': + opts++; + /* + * Look ahead one character here, because the next time + * this function is called, it will assume that end of + * input has been cleanly reached if no input remains, + * but we have optimistically already consumed the + * comma if one exists. + */ + if (*opts == '\0') { + malloc_write("<jemalloc>: Conf string ends " + "with comma\n"); + } + *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; + accept = true; + break; + case '\0': + *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; + accept = true; + break; + default: + opts++; + break; + } + } + + *opts_p = opts; return false; -} - -static void +} + +static void malloc_abort_invalid_conf(void) { assert(opt_abort_conf); malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " @@ -903,10 +903,10 @@ malloc_abort_invalid_conf(void) { } static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, size_t vlen) { - malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, - (int)vlen, v); + malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, + (int)vlen, v); /* If abort_conf is set, error out after processing all options. */ const char *experimental = "experimental_"; if (strncmp(k, experimental, strlen(experimental)) == 0) { @@ -914,14 +914,14 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, return; } had_conf_error = true; -} - -static void +} + +static void malloc_slow_flag_init(void) { - /* + /* * Combine the runtime options into malloc_slow for fast path. Called * after processing all the options. - */ + */ malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) | (opt_junk_free ? flag_opt_junk_free : 0) | (opt_zero ? flag_opt_zero : 0) @@ -943,9 +943,9 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { * syscalls on init. */ assert(read_source++ == which_source); - } + } assert(which_source < MALLOC_CONF_NSOURCES); - + const char *ret; switch (which_source) { case 0: @@ -962,16 +962,16 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { break; case 2: { ssize_t linklen = 0; -#ifndef _WIN32 +#ifndef _WIN32 int saved_errno = errno; const char *linkname = -# ifdef JEMALLOC_PREFIX +# ifdef JEMALLOC_PREFIX "/etc/"JEMALLOC_PREFIX"malloc.conf" -# else +# else "/etc/malloc.conf" -# endif +# endif ; - + /* * Try to use the contents of the "/etc/malloc.conf" symbolic * link's name. @@ -980,7 +980,7 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { linklen = readlink(linkname, buf, PATH_MAX); #else linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX); -#endif +#endif if (linklen == -1) { /* No configuration specified. */ linklen = 0; @@ -993,13 +993,13 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { break; } case 3: { const char *envname = -#ifdef JEMALLOC_PREFIX +#ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" -#else +#else "MALLOC_CONF" -#endif +#endif ; - + if ((ret = jemalloc_secure_getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to the value @@ -1008,7 +1008,7 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { } else { /* No configuration specified. */ ret = NULL; - } + } break; } default: not_reached(); @@ -1016,7 +1016,7 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { } return ret; } - + static void malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES], @@ -1073,15 +1073,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #define CONF_HANDLE_BOOL(o, n) \ if (CONF_MATCH(n)) { \ if (CONF_MATCH_VALUE("true")) { \ - o = true; \ + o = true; \ } else if (CONF_MATCH_VALUE("false")) { \ - o = false; \ + o = false; \ } else { \ CONF_ERROR("Invalid conf value",\ - k, klen, v, vlen); \ - } \ + k, klen, v, vlen); \ + } \ CONF_CONTINUE; \ - } + } /* * One of the CONF_MIN macros below expands, in one of the use points, * to "unsigned integer < 0", which is always false, triggering the @@ -1096,16 +1096,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #define CONF_CHECK_MAX(um, max) ((um) > (max)) #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ - uintmax_t um; \ - char *end; \ - \ - set_errno(0); \ - um = malloc_strtoumax(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ + uintmax_t um; \ + char *end; \ + \ + set_errno(0); \ + um = malloc_strtoumax(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ CONF_ERROR("Invalid conf value",\ - k, klen, v, vlen); \ - } else if (clip) { \ + k, klen, v, vlen); \ + } else if (clip) { \ if (check_min(um, (t)(min))) { \ o = (t)(min); \ } else if ( \ @@ -1114,19 +1114,19 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } else { \ o = (t)um; \ } \ - } else { \ + } else { \ if (check_min(um, (t)(min)) || \ check_max(um, (t)(max))) { \ CONF_ERROR( \ - "Out-of-range " \ - "conf value", \ - k, klen, v, vlen); \ + "Out-of-range " \ + "conf value", \ + k, klen, v, vlen); \ } else { \ o = (t)um; \ } \ - } \ + } \ CONF_CONTINUE; \ - } + } #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ clip) \ CONF_HANDLE_T_U(unsigned, o, n, min, max, \ @@ -1136,35 +1136,35 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], check_min, check_max, clip) #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (CONF_MATCH(n)) { \ - long l; \ - char *end; \ - \ - set_errno(0); \ - l = strtol(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ + long l; \ + char *end; \ + \ + set_errno(0); \ + l = strtol(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ CONF_ERROR("Invalid conf value",\ - k, klen, v, vlen); \ + k, klen, v, vlen); \ } else if (l < (ssize_t)(min) || l > \ (ssize_t)(max)) { \ CONF_ERROR( \ - "Out-of-range conf value", \ - k, klen, v, vlen); \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ } else { \ - o = l; \ + o = l; \ } \ CONF_CONTINUE; \ - } + } #define CONF_HANDLE_CHAR_P(o, n, d) \ if (CONF_MATCH(n)) { \ - size_t cpylen = (vlen <= \ - sizeof(o)-1) ? vlen : \ - sizeof(o)-1; \ - strncpy(o, v, cpylen); \ - o[cpylen] = '\0'; \ + size_t cpylen = (vlen <= \ + sizeof(o)-1) ? vlen : \ + sizeof(o)-1; \ + strncpy(o, v, cpylen); \ + o[cpylen] = '\0'; \ CONF_CONTINUE; \ - } - + } + bool cur_opt_valid = true; CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf") @@ -1172,7 +1172,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], continue; } - CONF_HANDLE_BOOL(opt_abort, "abort") + CONF_HANDLE_BOOL(opt_abort, "abort") CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") if (strncmp("metadata_thp", k, klen) == 0) { int i; @@ -1192,30 +1192,30 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_CONTINUE; } CONF_HANDLE_BOOL(opt_retain, "retain") - if (strncmp("dss", k, klen) == 0) { - int i; - bool match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strncmp(dss_prec_names[i], v, vlen) - == 0) { + if (strncmp("dss", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < dss_prec_limit; i++) { + if (strncmp(dss_prec_names[i], v, vlen) + == 0) { if (extent_dss_prec_set(i)) { CONF_ERROR( - "Error setting dss", - k, klen, v, vlen); - } else { - opt_dss = - dss_prec_names[i]; - match = true; - break; - } - } - } + "Error setting dss", + k, klen, v, vlen); + } else { + opt_dss = + dss_prec_names[i]; + match = true; + break; + } + } + } if (!match) { CONF_ERROR("Invalid conf value", - k, klen, v, vlen); - } + k, klen, v, vlen); + } CONF_CONTINUE; - } + } CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false) @@ -1249,12 +1249,12 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : SSIZE_MAX); - CONF_HANDLE_BOOL(opt_stats_print, "stats_print") + CONF_HANDLE_BOOL(opt_stats_print, "stats_print") if (CONF_MATCH("stats_print_opts")) { init_opt_stats_print_opts(v, vlen); CONF_CONTINUE; } - if (config_fill) { + if (config_fill) { if (CONF_MATCH("junk")) { if (CONF_MATCH_VALUE("true")) { opt_junk = "true"; @@ -1279,14 +1279,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } CONF_CONTINUE; } - CONF_HANDLE_BOOL(opt_zero, "zero") - } - if (config_utrace) { - CONF_HANDLE_BOOL(opt_utrace, "utrace") - } - if (config_xmalloc) { - CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") - } + CONF_HANDLE_BOOL(opt_zero, "zero") + } + if (config_utrace) { + CONF_HANDLE_BOOL(opt_utrace, "utrace") + } + if (config_xmalloc) { + CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") + } CONF_HANDLE_BOOL(opt_tcache, "tcache") CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", -1, (sizeof(size_t) << 3) - 1) @@ -1328,7 +1328,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], k, klen, v, vlen); } CONF_CONTINUE; - } + } CONF_HANDLE_BOOL(opt_background_thread, "background_thread"); CONF_HANDLE_SIZE_T(opt_max_background_threads, @@ -1360,26 +1360,26 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } while (!err && vlen_left > 0); CONF_CONTINUE; } - if (config_prof) { - CONF_HANDLE_BOOL(opt_prof, "prof") - CONF_HANDLE_CHAR_P(opt_prof_prefix, - "prof_prefix", "jeprof") - CONF_HANDLE_BOOL(opt_prof_active, "prof_active") + if (config_prof) { + CONF_HANDLE_BOOL(opt_prof, "prof") + CONF_HANDLE_CHAR_P(opt_prof_prefix, + "prof_prefix", "jeprof") + CONF_HANDLE_BOOL(opt_prof_active, "prof_active") CONF_HANDLE_BOOL(opt_prof_thread_active_init, "prof_thread_active_init") CONF_HANDLE_SIZE_T(opt_lg_prof_sample, "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, true) - CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") - CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, - "lg_prof_interval", -1, - (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") - CONF_HANDLE_BOOL(opt_prof_final, "prof_final") - CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") + CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") + CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, + "lg_prof_interval", -1, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") + CONF_HANDLE_BOOL(opt_prof_final, "prof_final") + CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") CONF_HANDLE_BOOL(opt_prof_log, "prof_log") - } + } if (config_log) { if (CONF_MATCH("log")) { size_t cpylen = ( @@ -1416,26 +1416,26 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #undef CONF_CONTINUE #undef CONF_MATCH #undef CONF_MATCH_VALUE -#undef CONF_HANDLE_BOOL +#undef CONF_HANDLE_BOOL #undef CONF_DONT_CHECK_MIN #undef CONF_CHECK_MIN #undef CONF_DONT_CHECK_MAX #undef CONF_CHECK_MAX #undef CONF_HANDLE_T_U #undef CONF_HANDLE_UNSIGNED -#undef CONF_HANDLE_SIZE_T -#undef CONF_HANDLE_SSIZE_T -#undef CONF_HANDLE_CHAR_P +#undef CONF_HANDLE_SIZE_T +#undef CONF_HANDLE_SSIZE_T +#undef CONF_HANDLE_CHAR_P /* Re-enable diagnostic "-Wtype-limits" */ JEMALLOC_DIAGNOSTIC_POP - } + } if (opt_abort_conf && had_conf_error) { malloc_abort_invalid_conf(); } - } + } atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); -} - +} + static void malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL}; @@ -1449,36 +1449,36 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { #undef MALLOC_CONF_NSOURCES -static bool +static bool malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == malloc_init_recursible)) { - /* - * Another thread initialized the allocator before this one - * acquired init_lock, or this thread is the initializing - * thread, and it is recursively allocating. - */ + /* + * Another thread initialized the allocator before this one + * acquired init_lock, or this thread is the initializing + * thread, and it is recursively allocating. + */ return false; - } -#ifdef JEMALLOC_THREADED_INIT + } +#ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { - /* Busy-wait until the initializing thread completes. */ + /* Busy-wait until the initializing thread completes. */ spin_t spinner = SPIN_INITIALIZER; - do { + do { malloc_mutex_unlock(TSDN_NULL, &init_lock); spin_adaptive(&spinner); malloc_mutex_lock(TSDN_NULL, &init_lock); } while (!malloc_initialized()); return false; - } -#endif + } +#endif return true; } static bool malloc_init_hard_a0_locked() { - malloc_initializer = INITIALIZER; - + malloc_initializer = INITIALIZER; + JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS sc_data_t sc_data = {0}; @@ -1500,101 +1500,101 @@ malloc_init_hard_a0_locked() { * it. */ if (config_prof) { - prof_boot0(); + prof_boot0(); } malloc_conf_init(&sc_data, bin_shard_sizes); sz_boot(&sc_data); bin_boot(&sc_data, bin_shard_sizes); - - if (opt_stats_print) { - /* Print statistics at exit. */ - if (atexit(stats_print_atexit) != 0) { - malloc_write("<jemalloc>: Error in atexit()\n"); + + if (opt_stats_print) { + /* Print statistics at exit. */ + if (atexit(stats_print_atexit) != 0) { + malloc_write("<jemalloc>: Error in atexit()\n"); if (opt_abort) { - abort(); + abort(); } - } - } + } + } if (pages_boot()) { return true; - } + } if (base_boot(TSDN_NULL)) { return true; - } + } if (extent_boot()) { return true; } - if (ctl_boot()) { + if (ctl_boot()) { return true; - } + } if (config_prof) { - prof_boot1(); - } + prof_boot1(); + } arena_boot(&sc_data); if (tcache_boot(TSDN_NULL)) { return true; - } + } if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, malloc_mutex_rank_exclusive)) { return true; - } + } hook_boot(); - /* - * Create enough scaffolding to allow recursive allocation in - * malloc_ncpus(). - */ + /* + * Create enough scaffolding to allow recursive allocation in + * malloc_ncpus(). + */ narenas_auto = 1; manual_arena_base = narenas_auto + 1; - memset(arenas, 0, sizeof(arena_t *) * narenas_auto); - /* - * Initialize one arena here. The rest are lazily created in + memset(arenas, 0, sizeof(arena_t *) * narenas_auto); + /* + * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). - */ + */ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) == NULL) { return true; - } + } a0 = arena_get(TSDN_NULL, 0, false); malloc_init_state = malloc_init_a0_initialized; - + return false; } - + static bool malloc_init_hard_a0(void) { bool ret; - + malloc_mutex_lock(TSDN_NULL, &init_lock); ret = malloc_init_hard_a0_locked(); malloc_mutex_unlock(TSDN_NULL, &init_lock); return ret; } - + /* Initialize data structures which may trigger recursive allocation. */ static bool malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; - - ncpus = malloc_ncpus(); - + + ncpus = malloc_ncpus(); + #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ !defined(__native_client__)) /* LinuxThreads' pthread_atfork() allocates. */ - if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, - jemalloc_postfork_child) != 0) { - malloc_write("<jemalloc>: Error in pthread_atfork()\n"); + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, + jemalloc_postfork_child) != 0) { + malloc_write("<jemalloc>: Error in pthread_atfork()\n"); if (opt_abort) { - abort(); + abort(); } return true; - } -#endif - + } +#endif + if (background_thread_boot0()) { return true; } - + return false; } @@ -1609,9 +1609,9 @@ malloc_narenas_default(void) { return ncpus << 2; } else { return 1; - } + } } - + static percpu_arena_mode_t percpu_arena_as_initialized(percpu_arena_mode_t mode) { assert(!malloc_initialized()); @@ -1677,26 +1677,26 @@ malloc_init_narenas(void) { } } } - if (opt_narenas == 0) { + if (opt_narenas == 0) { opt_narenas = malloc_narenas_default(); - } + } assert(opt_narenas > 0); - narenas_auto = opt_narenas; - /* + narenas_auto = opt_narenas; + /* * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). - */ + */ if (narenas_auto >= MALLOCX_ARENA_LIMIT) { narenas_auto = MALLOCX_ARENA_LIMIT - 1; - malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", - narenas_auto); - } + malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", + narenas_auto); + } narenas_total_set(narenas_auto); if (arena_init_huge()) { narenas_total_inc(); } manual_arena_base = narenas_total_get(); - + return false; } @@ -1709,14 +1709,14 @@ static bool malloc_init_hard_finish(void) { if (malloc_mutex_boot()) { return true; - } - + } + malloc_init_state = malloc_init_initialized; malloc_slow_flag_init(); - + return false; -} - +} + static void malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { malloc_mutex_assert_owner(tsdn, &init_lock); @@ -1801,14 +1801,14 @@ malloc_init_hard(void) { return false; } -/* - * End initialization functions. - */ -/******************************************************************************/ -/* +/* + * End initialization functions. + */ +/******************************************************************************/ +/* * Begin allocation-path internal functions and data structures. - */ - + */ + /* * Settings determined by the documented behavior of the allocation functions. */ @@ -1816,7 +1816,7 @@ typedef struct static_opts_s static_opts_t; struct static_opts_s { /* Whether or not allocation size may overflow. */ bool may_overflow; - + /* * Whether or not allocations (with alignment) of size 0 should be * treated as size 1. @@ -1827,7 +1827,7 @@ struct static_opts_s { * bumping). */ bool assert_nonempty_alloc; - + /* * Whether or not to modify the 'result' argument to malloc in case of * error. @@ -1835,17 +1835,17 @@ struct static_opts_s { bool null_out_result_on_error; /* Whether to set errno when we encounter an error condition. */ bool set_errno_on_error; - + /* * The minimum valid alignment for functions requesting aligned storage. */ size_t min_alignment; - + /* The error string to use if we oom. */ const char *oom_string; /* The error string to use if the passed-in alignment is invalid. */ const char *invalid_alignment_string; - + /* * False if we're configured to skip some time-consuming operations. * @@ -1872,17 +1872,17 @@ static_opts_init(static_opts_t *static_opts) { static_opts->invalid_alignment_string = ""; static_opts->slow = false; static_opts->usize = false; -} - -/* +} + +/* * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we * should have one constant here per magic value there. Note however that the * representations need not be related. - */ + */ #define TCACHE_IND_NONE ((unsigned)-1) #define TCACHE_IND_AUTOMATIC ((unsigned)-2) #define ARENA_IND_AUTOMATIC ((unsigned)-1) - + typedef struct dynamic_opts_s dynamic_opts_t; struct dynamic_opts_s { void **result; @@ -1913,7 +1913,7 @@ imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, size_t size, size_t usize, szind_t ind) { tcache_t *tcache; arena_t *arena; - + /* Fill in the tcache. */ if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { if (likely(!sopts->slow)) { @@ -1922,12 +1922,12 @@ imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, assert(tcache == tcache_get(tsd)); } else { tcache = tcache_get(tsd); - } + } } else if (dopts->tcache_ind == TCACHE_IND_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, dopts->tcache_ind); - } + } /* Fill in the arena. */ if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { @@ -1939,29 +1939,29 @@ imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, arena = NULL; } else { arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); - } - + } + if (unlikely(dopts->alignment != 0)) { return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, dopts->zero, tcache, arena); } - + return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, arena, sopts->slow); -} - +} + JEMALLOC_ALWAYS_INLINE void * imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, size_t usize, szind_t ind) { void *ret; - + /* * For small allocations, sampling bumps the usize. If so, we allocate * from the ind_large bucket. */ szind_t ind_large; size_t bumped_usize = usize; - + if (usize <= SC_SMALL_MAXCLASS) { assert(((dopts->alignment == 0) ? sz_s2u(SC_LARGE_MINCLASS) : @@ -1980,12 +1980,12 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, } return ret; -} - -/* +} + +/* * Returns true if the allocation will overflow, and false otherwise. Sets * *size to the product either way. - */ + */ JEMALLOC_ALWAYS_INLINE bool compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, size_t *size) { @@ -1993,13 +1993,13 @@ compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, * This function is just num_items * item_size, except that we may have * to check for overflow. */ - + if (!may_overflow) { assert(dopts->num_items == 1); *size = dopts->item_size; return false; } - + /* A size_t with its high-half bits all set to 1. */ static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); @@ -2048,7 +2048,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { /* Compute the amount of memory the user wants. */ if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, &size))) { - goto label_oom; + goto label_oom; } if (unlikely(dopts->alignment < sopts->min_alignment @@ -2069,26 +2069,26 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); } - } else { + } else { if (sopts->bump_empty_aligned_alloc) { if (unlikely(size == 0)) { size = 1; - } - } + } + } usize = sz_sa2u(size, dopts->alignment); dopts->usize = usize; if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { - goto label_oom; - } + goto label_oom; + } } /* Validate the user input. */ if (sopts->assert_nonempty_alloc) { assert (size != 0); } - + check_entry_exit_locking(tsd_tsdn(tsd)); - + /* * If we need to handle reentrancy, we can do it out of a * known-initialized arena (i.e. arena 0). @@ -2136,7 +2136,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { if (unlikely(allocation == NULL)) { prof_alloc_rollback(tsd, tctx, true); - goto label_oom; + goto label_oom; } prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); } else { @@ -2151,8 +2151,8 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { if (unlikely(allocation == NULL)) { goto label_oom; } - } - + } + /* * Allocation has been done at this point. We still have some * post-allocation work to do though. @@ -2163,7 +2163,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { if (config_stats) { assert(usize == isalloc(tsd_tsdn(tsd), allocation)); *tsd_thread_allocatedp_get(tsd) += usize; - } + } if (sopts->slow) { UTRACE(0, size, allocation); @@ -2174,21 +2174,21 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { *dopts->result = allocation; return 0; -label_oom: +label_oom: if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(sopts->oom_string); - abort(); - } - + abort(); + } + if (sopts->slow) { UTRACE(NULL, size, NULL); } - + check_entry_exit_locking(tsd_tsdn(tsd)); - + if (sopts->set_errno_on_error) { set_errno(ENOMEM); - } + } if (sopts->null_out_result_on_error) { *dopts->result = NULL; @@ -2222,8 +2222,8 @@ label_invalid_alignment: } return EINVAL; -} - +} + JEMALLOC_ALWAYS_INLINE bool imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { @@ -2234,20 +2234,20 @@ imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); set_errno(ENOMEM); *dopts->result = NULL; - + return false; } - + return true; -} - +} + /* Returns the errno-style error code of the allocation. */ JEMALLOC_ALWAYS_INLINE int imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { return ENOMEM; } - + /* We always need the tsd. Let's grab it right away. */ tsd_t *tsd = tsd_fetch(); assert(tsd); @@ -2260,21 +2260,21 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { return ENOMEM; } - + sopts->slow = true; return imalloc_body(sopts, dopts, tsd); } -} - +} + JEMALLOC_NOINLINE -void * +void * malloc_default(size_t size) { - void *ret; + void *ret; static_opts_t sopts; dynamic_opts_t dopts; - + LOG("core.malloc.entry", "size: %zu", size); - + static_opts_init(&sopts); dynamic_opts_init(&dopts); @@ -2287,17 +2287,17 @@ malloc_default(size_t size) { dopts.item_size = size; imalloc(&sopts, &dopts); - /* + /* * Note that this branch gets optimized away -- it immediately follows * the check on tsd_fast that sets sopts.slow. - */ + */ if (sopts.slow) { uintptr_t args[3] = {size}; hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args); - } - + } + LOG("core.malloc.exit", "result: %p", ret); - + return ret; } @@ -2333,8 +2333,8 @@ je_malloc(size_t size) { if (tsd_get_allocates() && unlikely(!malloc_initialized())) { return malloc_default(size); - } - + } + tsd_t *tsd = tsd_get(false); if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) { return malloc_default(size); @@ -2371,8 +2371,8 @@ je_malloc(size_t size) { tsd_bytes_until_sample_set(tsd, SSIZE_MAX); } return malloc_default(size); - } - } + } + } cache_bin_t *bin = tcache_small_bin_get(tcache, ind); bool tcache_success; @@ -2391,11 +2391,11 @@ je_malloc(size_t size) { /* Fastpath success */ return ret; - } + } return malloc_default(size); -} - +} + JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) je_posix_memalign(void **memptr, size_t alignment, size_t size) { @@ -2510,11 +2510,11 @@ je_calloc(size_t num, size_t size) { return ret; } -static void * +static void * irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { - void *p; - + void *p; + if (tctx == NULL) { return NULL; } @@ -2529,17 +2529,17 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, hook_args); } - + return p; -} - +} + JEMALLOC_ALWAYS_INLINE void * irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { - void *p; + void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; - + prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); tctx = prof_alloc_prep(tsd, usize, prof_active, true); @@ -2556,10 +2556,10 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, } prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); - + return p; -} - +} + JEMALLOC_ALWAYS_INLINE void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { if (!slow_path) { @@ -2579,7 +2579,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind != SC_NSIZES); - size_t usize; + size_t usize; if (config_prof && opt_prof) { usize = sz_index2size(alloc_ctx.szind); prof_free(tsd, ptr, usize, &alloc_ctx); @@ -2589,7 +2589,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; } - + if (likely(!slow_path)) { idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, false); @@ -2609,9 +2609,9 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { assert(slow_path); } - assert(ptr != NULL); + assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - + alloc_ctx_t alloc_ctx, *ctx; if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { /* @@ -2641,7 +2641,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { ctx = NULL; } - if (config_prof && opt_prof) { + if (config_prof && opt_prof) { prof_free(tsd, ptr, usize, ctx); } if (config_stats) { @@ -2653,24 +2653,24 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { } else { isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); } -} - +} + JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t arg_size) { - void *ret; + void *ret; tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - size_t old_usize = 0; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + size_t old_usize = 0; size_t size = arg_size; - + LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (unlikely(size == 0)) { - if (ptr != NULL) { - /* realloc(ptr, 0) is equivalent to free(ptr). */ - UTRACE(ptr, 0, 0); + if (ptr != NULL) { + /* realloc(ptr, 0) is equivalent to free(ptr). */ + UTRACE(ptr, 0, 0); tcache_t *tcache; tsd_t *tsd = tsd_fetch(); if (tsd_reentrancy_level_get(tsd) == 0) { @@ -2686,16 +2686,16 @@ je_realloc(void *ptr, size_t arg_size) { LOG("core.realloc.exit", "result: %p", NULL); return NULL; - } - size = 1; - } - + } + size = 1; + } + if (likely(ptr != NULL)) { assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch(); - + check_entry_exit_locking(tsd_tsdn(tsd)); - + hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr, (uintptr_t)arg_size, 0, 0}}; @@ -2707,7 +2707,7 @@ je_realloc(void *ptr, size_t arg_size) { assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); - if (config_prof && opt_prof) { + if (config_prof && opt_prof) { usize = sz_s2u(size); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { @@ -2716,16 +2716,16 @@ je_realloc(void *ptr, size_t arg_size) { ret = irealloc_prof(tsd, ptr, old_usize, usize, &alloc_ctx, &hook_args); } - } else { + } else { if (config_stats) { usize = sz_s2u(size); } ret = iralloc(tsd, ptr, old_usize, size, 0, false, &hook_args); - } + } tsdn = tsd_tsdn(tsd); - } else { - /* realloc(NULL, size) is equivalent to malloc(size). */ + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ static_opts_t sopts; dynamic_opts_t dopts; @@ -2749,16 +2749,16 @@ je_realloc(void *ptr, size_t arg_size) { } return ret; - } - + } + if (unlikely(ret == NULL)) { if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } + malloc_write("<jemalloc>: Error in realloc(): " + "out of memory\n"); + abort(); + } + set_errno(ENOMEM); + } if (config_stats && likely(ret != NULL)) { tsd_t *tsd; @@ -2766,16 +2766,16 @@ je_realloc(void *ptr, size_t arg_size) { tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - UTRACE(ptr, size, ret); + } + UTRACE(ptr, size, ret); check_entry_exit_locking(tsdn); LOG("core.realloc.exit", "result: %p", ret); return ret; -} - +} + JEMALLOC_NOINLINE -void +void free_default(void *ptr) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { @@ -2789,7 +2789,7 @@ free_default(void *ptr) { */ tsd_t *tsd = tsd_fetch_min(); check_entry_exit_locking(tsd_tsdn(tsd)); - + tcache_t *tcache; if (likely(tsd_fast(tsd))) { tsd_assert_fast(tsd); @@ -2808,8 +2808,8 @@ free_default(void *ptr) { } check_entry_exit_locking(tsd_tsdn(tsd)); } -} - +} + JEMALLOC_ALWAYS_INLINE bool free_fastpath(void *ptr, size_t size, bool size_hint) { tsd_t *tsd = tsd_get(false); @@ -2878,15 +2878,15 @@ je_free(void *ptr) { LOG("core.free.exit", ""); } -/* - * End malloc(3)-compatible functions. - */ -/******************************************************************************/ -/* - * Begin non-standard override functions. - */ - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN +/* + * End malloc(3)-compatible functions. + */ +/******************************************************************************/ +/* + * Begin non-standard override functions. + */ + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) @@ -2922,10 +2922,10 @@ je_memalign(size_t alignment, size_t size) { LOG("core.memalign.exit", "result: %p", ret); return ret; -} -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC +} +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) @@ -2960,19 +2960,19 @@ je_valloc(size_t size) { LOG("core.valloc.exit", "result: %p\n", ret); return ret; -} -#endif - +} +#endif + #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) -/* - * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible - * to inconsistently reference libc's malloc(3)-compatible functions - * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). - * - * These definitions interpose hooks in glibc. The functions are actually - * passed an extra argument for the caller return address, which will be - * ignored. - */ +/* + * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible + * to inconsistently reference libc's malloc(3)-compatible functions + * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). + * + * These definitions interpose hooks in glibc. The functions are actually + * passed an extra argument for the caller return address, which will be + * ignored. + */ #include <features.h> // defines __GLIBC__ if we are compiling against glibc JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; @@ -2980,7 +2980,7 @@ JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = - je_memalign; + je_memalign; # endif # ifdef __GLIBC__ @@ -3016,27 +3016,27 @@ int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); # undef PREALIAS # undef ALIAS # endif -#endif - -/* - * End non-standard override functions. - */ -/******************************************************************************/ -/* - * Begin non-standard functions. - */ - +#endif + +/* + * End non-standard override functions. + */ +/******************************************************************************/ +/* + * Begin non-standard functions. + */ + #ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API - + #define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y #define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) - + typedef struct { void *ptr; size_t size; } smallocx_return_t; - + JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN smallocx_return_t JEMALLOC_NOTHROW /* @@ -3055,17 +3055,17 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) smallocx_return_t ret; static_opts_t sopts; dynamic_opts_t dopts; - + LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags); - + static_opts_init(&sopts); dynamic_opts_init(&dopts); - + sopts.assert_nonempty_alloc = true; sopts.null_out_result_on_error = true; sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; sopts.usize = true; - + dopts.result = &ret.ptr; dopts.num_items = 1; dopts.item_size = size; @@ -3073,7 +3073,7 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); } - + dopts.zero = MALLOCX_ZERO_GET(flags); if ((flags & MALLOCX_TCACHE_MASK) != 0) { @@ -3097,11 +3097,11 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size); return ret; -} +} #undef JEMALLOC_SMALLOCX_CONCAT_HELPER #undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 #endif - + JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) @@ -3109,16 +3109,16 @@ je_mallocx(size_t size, int flags) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; - + LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); - + static_opts_init(&sopts); dynamic_opts_init(&dopts); - + sopts.assert_nonempty_alloc = true; sopts.null_out_result_on_error = true; sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; - + dopts.result = &ret; dopts.num_items = 1; dopts.item_size = size; @@ -3126,9 +3126,9 @@ je_mallocx(size_t size, int flags) { if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); } - + dopts.zero = MALLOCX_ZERO_GET(flags); - + if ((flags & MALLOCX_TCACHE_MASK) != 0) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { @@ -3139,28 +3139,28 @@ je_mallocx(size_t size, int flags) { } else { dopts.tcache_ind = TCACHE_IND_AUTOMATIC; } - + if ((flags & MALLOCX_ARENA_MASK) != 0) dopts.arena_ind = MALLOCX_ARENA_GET(flags); - } + } imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {size, flags}; hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret, args); - } + } LOG("core.mallocx.exit", "result: %p", ret); return ret; -} - -static void * +} + +static void * irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { - void *p; - + void *p; + if (tctx == NULL) { return NULL; } @@ -3172,22 +3172,22 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, return NULL; } arena_prof_promote(tsdn, p, usize); - } else { + } else { p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, tcache, arena, hook_args); - } - + } + return p; -} - +} + JEMALLOC_ALWAYS_INLINE void * irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache, arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { - void *p; + void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; - + prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); tctx = prof_alloc_prep(tsd, *usize, prof_active, false); @@ -3197,62 +3197,62 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, } else { p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, zero, tcache, arena, hook_args); - } + } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, false); return NULL; } - + if (p == old_ptr && alignment != 0) { - /* - * The allocation did not move, so it is possible that the size - * class is smaller than would guarantee the requested - * alignment, and that the alignment constraint was - * serendipitously satisfied. Additionally, old_usize may not - * be the same as the current usize because of in-place large - * reallocation. Therefore, query the actual value of usize. - */ + /* + * The allocation did not move, so it is possible that the size + * class is smaller than would guarantee the requested + * alignment, and that the alignment constraint was + * serendipitously satisfied. Additionally, old_usize may not + * be the same as the current usize because of in-place large + * reallocation. Therefore, query the actual value of usize. + */ *usize = isalloc(tsd_tsdn(tsd), p); - } + } prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, old_usize, old_tctx); - + return p; -} - +} + JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_rallocx(void *ptr, size_t size, int flags) { - void *p; + void *p; tsd_t *tsd; size_t usize; size_t old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; - arena_t *arena; + bool zero = flags & MALLOCX_ZERO; + arena_t *arena; tcache_t *tcache; - + LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); - assert(ptr != NULL); - assert(size != 0); + assert(ptr != NULL); + assert(size != 0); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); - + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(arena == NULL)) { goto label_oom; } - } else { - arena = NULL; - } - + } else { + arena = NULL; + } + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; @@ -3262,7 +3262,7 @@ je_rallocx(void *ptr, size_t size, int flags) { } else { tcache = tcache_get(tsd); } - + alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, @@ -3273,85 +3273,85 @@ je_rallocx(void *ptr, size_t size, int flags) { hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags, 0}}; - if (config_prof && opt_prof) { + if (config_prof && opt_prof) { usize = (alignment == 0) ? sz_s2u(size) : sz_sa2u(size, alignment); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { - goto label_oom; + goto label_oom; } p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, zero, tcache, arena, &alloc_ctx, &hook_args); if (unlikely(p == NULL)) { goto label_oom; } - } else { + } else { p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, zero, tcache, arena, &hook_args); if (unlikely(p == NULL)) { - goto label_oom; + goto label_oom; } if (config_stats) { usize = isalloc(tsd_tsdn(tsd), p); } - } + } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - - if (config_stats) { + + if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - UTRACE(ptr, size, p); + } + UTRACE(ptr, size, p); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.rallocx.exit", "result: %p", p); return p; -label_oom: +label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); - abort(); - } - UTRACE(ptr, size, 0); + malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.rallocx.exit", "result: %p", NULL); return NULL; -} - +} + JEMALLOC_ALWAYS_INLINE size_t ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t newsize; - + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero, &newsize)) { return old_usize; } - + return newsize; -} - -static size_t +} + +static size_t ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { - size_t usize; - + size_t usize; + if (tctx == NULL) { return old_usize; - } + } usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, zero); - + return usize; -} - +} + JEMALLOC_ALWAYS_INLINE size_t ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { size_t usize_max, usize; bool prof_active; prof_tctx_t *old_tctx, *tctx; - + prof_active = prof_active_get_unlocked(); old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); /* @@ -3364,7 +3364,7 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, usize_max = sz_s2u(size+extra); assert(usize_max > 0 && usize_max <= SC_LARGE_MAXCLASS); - } else { + } else { usize_max = sz_sa2u(size+extra, alignment); if (unlikely(usize_max == 0 || usize_max > SC_LARGE_MAXCLASS)) { @@ -3376,9 +3376,9 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, */ usize_max = SC_LARGE_MAXCLASS; } - } + } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); - + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero, tctx); @@ -3394,25 +3394,25 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, old_tctx); return usize; -} - +} + JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; - size_t usize, old_usize; + size_t usize, old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; - + bool zero = flags & MALLOCX_ZERO; + LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " "flags: %d", ptr, size, extra, flags); - assert(ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); + assert(ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); - + alloc_ctx_t alloc_ctx; rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, @@ -3436,44 +3436,44 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { if (unlikely(SC_LARGE_MAXCLASS - size < extra)) { extra = SC_LARGE_MAXCLASS - size; } - - if (config_prof && opt_prof) { + + if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero, &alloc_ctx); - } else { + } else { usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero); - } + } if (unlikely(usize == old_usize)) { - goto label_not_resized; + goto label_not_resized; } - - if (config_stats) { + + if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; - } -label_not_resized: + } +label_not_resized: if (unlikely(!tsd_fast(tsd))) { uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags}; hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, usize, (uintptr_t)usize, args); } - UTRACE(ptr, size, ptr); + UTRACE(ptr, size, ptr); check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.xallocx.exit", "result: %zu", usize); return usize; -} - +} + JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, int flags) { - size_t usize; + size_t usize; tsdn_t *tsdn; - + LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); - + assert(malloc_initialized() || IS_INITIALIZER); assert(ptr != NULL); @@ -3485,25 +3485,25 @@ je_sallocx(const void *ptr, int flags) { assert(force_ivsalloc || usize != 0); } else { usize = isalloc(tsdn, ptr); - } - + } + check_entry_exit_locking(tsdn); LOG("core.sallocx.exit", "result: %zu", usize); return usize; -} - +} + JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags) { LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); - - assert(ptr != NULL); + + assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - + tsd_t *tsd = tsd_fetch(); bool fast = tsd_fast(tsd); check_entry_exit_locking(tsd_tsdn(tsd)); - + tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { /* Not allowed to be reentrant and specify a custom tcache. */ @@ -3526,7 +3526,7 @@ je_dallocx(void *ptr, int flags) { } } - UTRACE(ptr, 0, 0); + UTRACE(ptr, 0, 0); if (likely(fast)) { tsd_assert_fast(tsd); ifree(tsd, ptr, tcache, false); @@ -3534,17 +3534,17 @@ je_dallocx(void *ptr, int flags) { uintptr_t args_raw[3] = {(uintptr_t)ptr, flags}; hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw); ifree(tsd, ptr, tcache, true); - } + } check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.dallocx.exit", ""); -} - +} + JEMALLOC_ALWAYS_INLINE size_t inallocx(tsdn_t *tsdn, size_t size, int flags) { check_entry_exit_locking(tsdn); - size_t usize; + size_t usize; if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { usize = sz_s2u(size); } else { @@ -3553,18 +3553,18 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) { check_entry_exit_locking(tsdn); return usize; } - + JEMALLOC_NOINLINE void sdallocx_default(void *ptr, size_t size, int flags) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - + tsd_t *tsd = tsd_fetch(); bool fast = tsd_fast(tsd); size_t usize = inallocx(tsd_tsdn(tsd), size, flags); assert(usize == isalloc(tsd_tsdn(tsd), ptr)); check_entry_exit_locking(tsd_tsdn(tsd)); - + tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { /* Not allowed to be reentrant and specify a custom tcache. */ @@ -3598,48 +3598,48 @@ sdallocx_default(void *ptr, size_t size, int flags) { } check_entry_exit_locking(tsd_tsdn(tsd)); -} - +} + JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags) { LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); - + if (flags !=0 || !free_fastpath(ptr, size, true)) { sdallocx_default(ptr, size, flags); } - + LOG("core.sdallocx.exit", ""); -} - +} + void JEMALLOC_NOTHROW je_sdallocx_noflags(void *ptr, size_t size) { LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr, size); - + if (!free_fastpath(ptr, size, true)) { sdallocx_default(ptr, size, 0); } - + LOG("core.sdallocx.exit", ""); -} - +} + JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { size_t usize; tsdn_t *tsdn; - + assert(size != 0); - + if (unlikely(malloc_init())) { LOG("core.nallocx.exit", "result: %zu", ZU(0)); return 0; } - + tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); - + usize = inallocx(tsdn, size, flags); if (unlikely(usize > SC_LARGE_MAXCLASS)) { LOG("core.nallocx.exit", "result: %zu", ZU(0)); @@ -3649,21 +3649,21 @@ je_nallocx(size_t size, int flags) { check_entry_exit_locking(tsdn); LOG("core.nallocx.exit", "result: %zu", usize); return usize; -} - +} + JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsd_t *tsd; - + LOG("core.mallctl.entry", "name: %s", name); - + if (unlikely(malloc_init())) { LOG("core.mallctl.exit", "result: %d", EAGAIN); return EAGAIN; } - + tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); @@ -3671,19 +3671,19 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, LOG("core.mallctl.exit", "result: %d", ret); return ret; -} - +} + JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; - + LOG("core.mallctlnametomib.entry", "name: %s", name); - + if (unlikely(malloc_init())) { LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); return EAGAIN; } - + tsd_t *tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); ret = ctl_nametomib(tsd, name, mibp, miblenp); @@ -3691,20 +3691,20 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { LOG("core.mallctlnametomib.exit", "result: %d", ret); return ret; -} - +} + JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; tsd_t *tsd; - + LOG("core.mallctlbymib.entry", ""); - + if (unlikely(malloc_init())) { LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); return EAGAIN; - } + } tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); @@ -3712,13 +3712,13 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, check_entry_exit_locking(tsd_tsdn(tsd)); LOG("core.mallctlbymib.exit", "result: %d", ret); return ret; -} - +} + JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { tsdn_t *tsdn; - + LOG("core.malloc_stats_print.entry", ""); tsdn = tsdn_fetch(); @@ -3726,17 +3726,17 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, stats_print(write_cb, cbopaque, opts); check_entry_exit_locking(tsdn); LOG("core.malloc_stats_print.exit", ""); -} - +} + JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; tsdn_t *tsdn; - + LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); - + assert(malloc_initialized() || IS_INITIALIZER); - + tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); @@ -3754,69 +3754,69 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { check_entry_exit_locking(tsdn); LOG("core.malloc_usable_size.exit", "result: %zu", ret); return ret; -} - -/* +} + +/* * End non-standard functions. - */ -/******************************************************************************/ -/* - * The following functions are used by threading libraries for protection of - * malloc during fork(). - */ - -/* - * If an application creates a thread before doing any allocation in the main - * thread, then calls fork(2) in the main thread followed by memory allocation - * in the child process, a race can occur that results in deadlock within the - * child: the main thread may have forked while the created thread had - * partially initialized the allocator. Ordinarily jemalloc prevents - * fork/malloc races via the following functions it registers during - * initialization using pthread_atfork(), but of course that does no good if - * the allocator isn't fully initialized at fork time. The following library + */ +/******************************************************************************/ +/* + * The following functions are used by threading libraries for protection of + * malloc during fork(). + */ + +/* + * If an application creates a thread before doing any allocation in the main + * thread, then calls fork(2) in the main thread followed by memory allocation + * in the child process, a race can occur that results in deadlock within the + * child: the main thread may have forked while the created thread had + * partially initialized the allocator. Ordinarily jemalloc prevents + * fork/malloc races via the following functions it registers during + * initialization using pthread_atfork(), but of course that does no good if + * the allocator isn't fully initialized at fork time. The following library * constructor is a partial solution to this problem. It may still be possible * to trigger the deadlock described above, but doing so would involve forking * via a library constructor that runs before jemalloc's runs. - */ + */ #ifndef JEMALLOC_JET -JEMALLOC_ATTR(constructor) -static void +JEMALLOC_ATTR(constructor) +static void jemalloc_constructor(void) { - malloc_init(); -} -#endif - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_prefork(void) -#else -JEMALLOC_EXPORT void -_malloc_prefork(void) + malloc_init(); +} #endif -{ + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_prefork(void) +#else +JEMALLOC_EXPORT void +_malloc_prefork(void) +#endif +{ tsd_t *tsd; unsigned i, j, narenas; arena_t *arena; - -#ifdef JEMALLOC_MUTEX_INIT_CB + +#ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) { - return; + return; } -#endif +#endif assert(malloc_initialized()); - + tsd = tsd_fetch(); narenas = narenas_total_get(); witness_prefork(tsd_witness_tsdp_get(tsd)); - /* Acquire all mutexes in a safe order. */ + /* Acquire all mutexes in a safe order. */ ctl_prefork(tsd_tsdn(tsd)); tcache_prefork(tsd_tsdn(tsd)); malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); if (have_background_thread) { background_thread_prefork0(tsd_tsdn(tsd)); - } + } prof_prefork0(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_prefork1(tsd_tsdn(tsd)); @@ -3858,39 +3858,39 @@ _malloc_prefork(void) } prof_prefork1(tsd_tsdn(tsd)); tsd_prefork(tsd); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_postfork_parent(void) -#else -JEMALLOC_EXPORT void -_malloc_postfork(void) -#endif -{ +} + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_postfork_parent(void) +#else +JEMALLOC_EXPORT void +_malloc_postfork(void) +#endif +{ tsd_t *tsd; unsigned i, narenas; - -#ifdef JEMALLOC_MUTEX_INIT_CB + +#ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) { - return; + return; } -#endif +#endif assert(malloc_initialized()); - + tsd = tsd_fetch(); - + tsd_postfork_parent(tsd); - + witness_postfork_parent(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_parent(tsd_tsdn(tsd), arena); } - } + } prof_postfork_parent(tsd_tsdn(tsd)); if (have_background_thread) { background_thread_postfork_parent(tsd_tsdn(tsd)); @@ -3898,24 +3898,24 @@ _malloc_postfork(void) malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); tcache_postfork_parent(tsd_tsdn(tsd)); ctl_postfork_parent(tsd_tsdn(tsd)); -} - +} + void jemalloc_postfork_child(void) { tsd_t *tsd; unsigned i, narenas; - + assert(malloc_initialized()); - + tsd = tsd_fetch(); - + tsd_postfork_child(tsd); - + witness_postfork_child(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_child(tsd_tsdn(tsd), arena); } @@ -3927,6 +3927,6 @@ jemalloc_postfork_child(void) { malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); tcache_postfork_child(tsd_tsdn(tsd)); ctl_postfork_child(tsd_tsdn(tsd)); -} - -/******************************************************************************/ +} + +/******************************************************************************/ diff --git a/contrib/libs/jemalloc/src/mutex.c b/contrib/libs/jemalloc/src/mutex.c index 3f920f5b1c..92965c26bd 100644 --- a/contrib/libs/jemalloc/src/mutex.c +++ b/contrib/libs/jemalloc/src/mutex.c @@ -1,48 +1,48 @@ #define JEMALLOC_MUTEX_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/spin.h" - -#ifndef _CRT_SPINCOUNT + +#ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 -#endif - -/******************************************************************************/ -/* Data. */ - -#ifdef JEMALLOC_LAZY_LOCK -bool isthreaded = false; -#endif -#ifdef JEMALLOC_MUTEX_INIT_CB -static bool postpone_init = true; -static malloc_mutex_t *postponed_mutexes = NULL; -#endif - -/******************************************************************************/ -/* - * We intercept pthread_create() calls in order to toggle isthreaded if the - * process goes multi-threaded. - */ - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -JEMALLOC_EXPORT int -pthread_create(pthread_t *__restrict thread, - const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), +#endif + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_LAZY_LOCK +bool isthreaded = false; +#endif +#ifdef JEMALLOC_MUTEX_INIT_CB +static bool postpone_init = true; +static malloc_mutex_t *postponed_mutexes = NULL; +#endif + +/******************************************************************************/ +/* + * We intercept pthread_create() calls in order to toggle isthreaded if the + * process goes multi-threaded. + */ + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +JEMALLOC_EXPORT int +pthread_create(pthread_t *__restrict thread, + const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { return pthread_create_wrapper(thread, attr, start_routine, arg); -} -#endif - -/******************************************************************************/ - -#ifdef JEMALLOC_MUTEX_INIT_CB -JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); -#endif - +} +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_MUTEX_INIT_CB +JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); +#endif + void malloc_mutex_lock_slow(malloc_mutex_t *mutex) { mutex_prof_data_t *data = &mutex->prof_data; @@ -131,44 +131,44 @@ mutex_addr_comp(const witness_t *witness1, void *mutex1, } } -bool +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { mutex_prof_data_init(&mutex->prof_data); -#ifdef _WIN32 +#ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else - if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, + if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, _CRT_SPINCOUNT)) { return true; } # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mutex->lock = OS_UNFAIR_LOCK_INIT; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - if (postpone_init) { - mutex->postponed_next = postponed_mutexes; - postponed_mutexes = mutex; - } else { +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + if (postpone_init) { + mutex->postponed_next = postponed_mutexes; + postponed_mutexes = mutex; + } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, bootstrap_calloc) != 0) { return true; } - } -#else - pthread_mutexattr_t attr; - + } +#else + pthread_mutexattr_t attr; + if (pthread_mutexattr_init(&attr) != 0) { return true; } - pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); - if (pthread_mutex_init(&mutex->lock, &attr) != 0) { - pthread_mutexattr_destroy(&attr); + pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); + if (pthread_mutex_init(&mutex->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); return true; - } - pthread_mutexattr_destroy(&attr); -#endif + } + pthread_mutexattr_destroy(&attr); +#endif if (config_debug) { mutex->lock_order = lock_order; if (lock_order == malloc_mutex_address_ordered) { @@ -179,45 +179,45 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, } } return false; -} - -void +} + +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_lock(tsdn, mutex); -} - -void +} + +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_unlock(tsdn, mutex); -} - -void +} + +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { -#ifdef JEMALLOC_MUTEX_INIT_CB +#ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(tsdn, mutex); -#else +#else if (malloc_mutex_init(mutex, mutex->witness.name, mutex->witness.rank, mutex->lock_order)) { - malloc_printf("<jemalloc>: Error re-initializing mutex in " - "child\n"); + malloc_printf("<jemalloc>: Error re-initializing mutex in " + "child\n"); if (opt_abort) { - abort(); + abort(); } - } -#endif -} - -bool + } +#endif +} + +bool malloc_mutex_boot(void) { -#ifdef JEMALLOC_MUTEX_INIT_CB - postpone_init = false; - while (postponed_mutexes != NULL) { - if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, +#ifdef JEMALLOC_MUTEX_INIT_CB + postpone_init = false; + while (postponed_mutexes != NULL) { + if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, bootstrap_calloc) != 0) { return true; } - postponed_mutexes = postponed_mutexes->postponed_next; - } -#endif + postponed_mutexes = postponed_mutexes->postponed_next; + } +#endif return false; -} +} diff --git a/contrib/libs/jemalloc/src/prof.c b/contrib/libs/jemalloc/src/prof.c index da834b54fb..ca8945f6db 100644 --- a/contrib/libs/jemalloc/src/prof.c +++ b/contrib/libs/jemalloc/src/prof.c @@ -9,44 +9,44 @@ #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/emitter.h" -/******************************************************************************/ - -#ifdef JEMALLOC_PROF_LIBUNWIND +/******************************************************************************/ + +#ifdef JEMALLOC_PROF_LIBUNWIND #define UNW_LOCAL_ONLY #include <libunwind.h> -#endif - -#ifdef JEMALLOC_PROF_LIBGCC +#endif + +#ifdef JEMALLOC_PROF_LIBGCC /* * We have a circular dependency -- jemalloc_internal.h tells us if we should * use libgcc's unwinding functionality, but after we've included that, we've * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. */ #undef _Unwind_Backtrace -#include <unwind.h> +#include <unwind.h> #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) -#endif - -/******************************************************************************/ -/* Data. */ - -bool opt_prof = false; -bool opt_prof_active = true; +#endif + +/******************************************************************************/ +/* Data. */ + +bool opt_prof = false; +bool opt_prof_active = true; bool opt_prof_thread_active_init = true; -size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; -bool opt_prof_gdump = false; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; bool opt_prof_final = false; -bool opt_prof_leak = false; -bool opt_prof_accum = false; +bool opt_prof_leak = false; +bool opt_prof_accum = false; bool opt_prof_log = false; -char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - +char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + /* * Initialized as opt_prof_active, and accessed via * prof_active_[gs]et{_unlocked,}(). @@ -68,8 +68,8 @@ static malloc_mutex_t prof_thread_active_init_mtx; bool prof_gdump_val; static malloc_mutex_t prof_gdump_mtx; -uint64_t prof_interval = 0; - +uint64_t prof_interval = 0; + size_t lg_prof_sample; typedef enum prof_logging_state_e prof_logging_state_t; @@ -79,7 +79,7 @@ enum prof_logging_state_e { prof_logging_state_dumping }; -/* +/* * - stopped: log_start never called, or previous log_stop has completed. * - started: log_start called, log_stop not called yet. Allocations are logged. * - dumping: log_stop called but not finished; samples are not logged anymore. @@ -170,13 +170,13 @@ static malloc_mutex_t log_mtx; * Table of mutexes that are shared among gctx's. These are leaf locks, so * there is no problem with using them for more than one gctx at the same time. * The primary motivation for this sharing though is that gctx's are ephemeral, - * and destroying mutexes causes complications for systems that allocate when - * creating/destroying mutexes. - */ + * and destroying mutexes causes complications for systems that allocate when + * creating/destroying mutexes. + */ static malloc_mutex_t *gctx_locks; static atomic_u_t cum_gctxs; /* Atomic counter. */ - -/* + +/* * Table of mutexes that are shared among tdata's. No operations require * holding multiple tdata locks, so there is no problem with using them for more * than one tdata at the same time, even though a gctx lock may be acquired @@ -186,12 +186,12 @@ static malloc_mutex_t *tdata_locks; /* * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ + * structure that knows about all backtraces currently captured. + */ static ckh_t bt2gctx; /* Non static to enable profiling. */ malloc_mutex_t bt2gctx_mtx; - + /* * Tree of all extant prof_tdata_t structures, regardless of state, * {attached,detached,expired}. @@ -202,37 +202,37 @@ static malloc_mutex_t tdatas_mtx; static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; -static malloc_mutex_t prof_dump_seq_mtx; -static uint64_t prof_dump_seq; -static uint64_t prof_dump_iseq; -static uint64_t prof_dump_mseq; -static uint64_t prof_dump_useq; - -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. - */ -static malloc_mutex_t prof_dump_mtx; -static char prof_dump_buf[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PROF_DUMP_BUFSIZE -#else - 1 -#endif -]; +static malloc_mutex_t prof_dump_seq_mtx; +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. + */ +static malloc_mutex_t prof_dump_mtx; +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; static size_t prof_dump_buf_end; -static int prof_dump_fd; - -/* Do not dump any profiles until bootstrapping is complete. */ -static bool prof_booted = false; - -/******************************************************************************/ +static int prof_dump_fd; + +/* Do not dump any profiles until bootstrapping is complete. */ +static bool prof_booted = false; + +/******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ - + static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, @@ -309,12 +309,12 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, /******************************************************************************/ -void +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; - - cassert(config_prof); - + + cassert(config_prof); + if (updated) { /* * Compute a new sample threshold. This isn't very important in @@ -337,8 +337,8 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } -} - +} + void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { @@ -440,10 +440,10 @@ prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { } } -static void +static void prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - + prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); if (cons_tdata == NULL) { /* @@ -453,7 +453,7 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { */ return; } - + malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); if (prof_logging_state != prof_logging_state_started) { @@ -515,58 +515,58 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { label_done: malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); -} - +} + void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); - + assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - + prof_try_log(tsd, ptr, usize, tctx); - + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } -} - +} + void bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); - + bt->vec = vec; bt->len = 0; } static void prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); + cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - + if (tdata != NULL) { assert(!tdata->enq); tdata->enq = true; } - + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); -} - +} + static void prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); + cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - + if (tdata != NULL) { bool idump, gdump; - + assert(tdata->enq); tdata->enq = false; idump = tdata->enq_idump; @@ -581,257 +581,257 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { prof_gdump(tsd_tsdn(tsd)); } } -} - -#ifdef JEMALLOC_PROF_LIBUNWIND -void +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +void prof_backtrace(prof_bt_t *bt) { - unw_context_t uc; - unw_cursor_t cursor; - unsigned i; - int err; - - cassert(config_prof); - assert(bt->len == 0); - assert(bt->vec != NULL); - - unw_getcontext(&uc); - unw_init_local(&cursor, &uc); - - /* - * Iterate over stack frames until there are no more, or until no space - * remains in bt. - */ - for (i = 0; i < PROF_BT_MAX; i++) { - unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); - bt->len++; - err = unw_step(&cursor); - if (err <= 0) - break; - } -} -#elif (defined(JEMALLOC_PROF_LIBGCC)) -static _Unwind_Reason_Code + unw_context_t uc; + unw_cursor_t cursor; + unsigned i; + int err; + + cassert(config_prof); + assert(bt->len == 0); + assert(bt->vec != NULL); + + unw_getcontext(&uc); + unw_init_local(&cursor, &uc); + + /* + * Iterate over stack frames until there are no more, or until no space + * remains in bt. + */ + for (i = 0; i < PROF_BT_MAX; i++) { + unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); + bt->len++; + err = unw_step(&cursor); + if (err <= 0) + break; + } +} +#elif (defined(JEMALLOC_PROF_LIBGCC)) +static _Unwind_Reason_Code prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { - cassert(config_prof); - + cassert(config_prof); + return _URC_NO_REASON; -} - -static _Unwind_Reason_Code +} + +static _Unwind_Reason_Code prof_unwind_callback(struct _Unwind_Context *context, void *arg) { - prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; - - cassert(config_prof); - + + cassert(config_prof); + ip = (void *)_Unwind_GetIP(context); if (ip == NULL) { return _URC_END_OF_STACK; - } + } data->bt->vec[data->bt->len] = ip; data->bt->len++; if (data->bt->len == data->max) { return _URC_END_OF_STACK; } - + return _URC_NO_REASON; -} - -void +} + +void prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; - - cassert(config_prof); - - _Unwind_Backtrace(prof_unwind_callback, &data); -} -#elif (defined(JEMALLOC_PROF_GCC)) -void + + cassert(config_prof); + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#elif (defined(JEMALLOC_PROF_GCC)) +void prof_backtrace(prof_bt_t *bt) { #define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ - void *p; \ + void *p; \ if (__builtin_frame_address(i) == 0) { \ - return; \ + return; \ } \ - p = __builtin_return_address(i); \ + p = __builtin_return_address(i); \ if (p == NULL) { \ - return; \ - } \ + return; \ + } \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ } else { \ return; \ } - - cassert(config_prof); - - BT_FRAME(0) - BT_FRAME(1) - BT_FRAME(2) - BT_FRAME(3) - BT_FRAME(4) - BT_FRAME(5) - BT_FRAME(6) - BT_FRAME(7) - BT_FRAME(8) - BT_FRAME(9) - - BT_FRAME(10) - BT_FRAME(11) - BT_FRAME(12) - BT_FRAME(13) - BT_FRAME(14) - BT_FRAME(15) - BT_FRAME(16) - BT_FRAME(17) - BT_FRAME(18) - BT_FRAME(19) - - BT_FRAME(20) - BT_FRAME(21) - BT_FRAME(22) - BT_FRAME(23) - BT_FRAME(24) - BT_FRAME(25) - BT_FRAME(26) - BT_FRAME(27) - BT_FRAME(28) - BT_FRAME(29) - - BT_FRAME(30) - BT_FRAME(31) - BT_FRAME(32) - BT_FRAME(33) - BT_FRAME(34) - BT_FRAME(35) - BT_FRAME(36) - BT_FRAME(37) - BT_FRAME(38) - BT_FRAME(39) - - BT_FRAME(40) - BT_FRAME(41) - BT_FRAME(42) - BT_FRAME(43) - BT_FRAME(44) - BT_FRAME(45) - BT_FRAME(46) - BT_FRAME(47) - BT_FRAME(48) - BT_FRAME(49) - - BT_FRAME(50) - BT_FRAME(51) - BT_FRAME(52) - BT_FRAME(53) - BT_FRAME(54) - BT_FRAME(55) - BT_FRAME(56) - BT_FRAME(57) - BT_FRAME(58) - BT_FRAME(59) - - BT_FRAME(60) - BT_FRAME(61) - BT_FRAME(62) - BT_FRAME(63) - BT_FRAME(64) - BT_FRAME(65) - BT_FRAME(66) - BT_FRAME(67) - BT_FRAME(68) - BT_FRAME(69) - - BT_FRAME(70) - BT_FRAME(71) - BT_FRAME(72) - BT_FRAME(73) - BT_FRAME(74) - BT_FRAME(75) - BT_FRAME(76) - BT_FRAME(77) - BT_FRAME(78) - BT_FRAME(79) - - BT_FRAME(80) - BT_FRAME(81) - BT_FRAME(82) - BT_FRAME(83) - BT_FRAME(84) - BT_FRAME(85) - BT_FRAME(86) - BT_FRAME(87) - BT_FRAME(88) - BT_FRAME(89) - - BT_FRAME(90) - BT_FRAME(91) - BT_FRAME(92) - BT_FRAME(93) - BT_FRAME(94) - BT_FRAME(95) - BT_FRAME(96) - BT_FRAME(97) - BT_FRAME(98) - BT_FRAME(99) - - BT_FRAME(100) - BT_FRAME(101) - BT_FRAME(102) - BT_FRAME(103) - BT_FRAME(104) - BT_FRAME(105) - BT_FRAME(106) - BT_FRAME(107) - BT_FRAME(108) - BT_FRAME(109) - - BT_FRAME(110) - BT_FRAME(111) - BT_FRAME(112) - BT_FRAME(113) - BT_FRAME(114) - BT_FRAME(115) - BT_FRAME(116) - BT_FRAME(117) - BT_FRAME(118) - BT_FRAME(119) - - BT_FRAME(120) - BT_FRAME(121) - BT_FRAME(122) - BT_FRAME(123) - BT_FRAME(124) - BT_FRAME(125) - BT_FRAME(126) - BT_FRAME(127) -#undef BT_FRAME -} -#else -void + + cassert(config_prof); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) +#undef BT_FRAME +} +#else +void prof_backtrace(prof_bt_t *bt) { - cassert(config_prof); - not_reached(); -} -#endif - -static malloc_mutex_t * + cassert(config_prof); + not_reached(); +} +#endif + +static malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); - + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; -} - +} + static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } - + static prof_gctx_t * prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { - /* + /* * Create a single allocation that has space for vec of length bt->len. */ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); @@ -843,9 +843,9 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { } gctx->lock = prof_gctx_mutex_choose(); /* - * Set nlimbo to 1, in order to avoid a race condition with + * Set nlimbo to 1, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ + */ gctx->nlimbo = 1; tctx_tree_new(&gctx->tctxs); /* Duplicate bt. */ @@ -853,43 +853,43 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; return gctx; -} - -static void +} + +static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, prof_tdata_t *tdata) { - cassert(config_prof); - - /* + cassert(config_prof); + + /* * Check that gctx is still unused by any thread cache before destroying * it. prof_lookup() increments gctx->nlimbo in order to avoid a race * condition with this function, as does prof_tctx_destroy() in order to * avoid a race between the main body of prof_tctx_destroy() and entry - * into this function. - */ + * into this function. + */ prof_enter(tsd, tdata_self); malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { - not_reached(); + not_reached(); } prof_leave(tsd, tdata_self); /* Destroy gctx. */ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); - } else { - /* + } else { + /* * Compensate for increment in prof_tctx_destroy() or - * prof_lookup(). - */ + * prof_lookup(). + */ gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); - } -} - + } +} + static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); @@ -920,14 +920,14 @@ prof_gctx_should_destroy(prof_gctx_t *gctx) { return true; } -static void +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; - + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - + assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); @@ -965,11 +965,11 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { } break; case prof_tctx_state_dumping: - /* + /* * A dumping thread needs tctx to remain valid until dumping * has finished. Change state such that the dumping thread will * complete destruction during a late dump iteration phase. - */ + */ tctx->state = prof_tctx_state_purgatory; destroy_tctx = false; destroy_gctx = false; @@ -994,29 +994,29 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { if (destroy_tctx) { idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); } -} - -static bool +} + +static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { - union { + union { prof_gctx_t *p; - void *v; + void *v; } gctx, tgctx; - union { - prof_bt_t *p; - void *v; - } btkey; + union { + prof_bt_t *p; + void *v; + } btkey; bool new_gctx; - + prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - /* bt has never been seen before. Insert it. */ + /* bt has never been seen before. Insert it. */ prof_leave(tsd, tdata); tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (tgctx.v == NULL) { return true; - } + } prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { gctx.p = tgctx.p; @@ -1031,17 +1031,17 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, new_gctx = true; } else { new_gctx = false; - } - } else { + } + } else { tgctx.v = NULL; new_gctx = false; } if (!new_gctx) { - /* - * Increment nlimbo, in order to avoid a race condition with + /* + * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ + */ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); @@ -1052,31 +1052,31 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, true); } - } + } prof_leave(tsd, tdata); - - *p_btkey = btkey.v; + + *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; return false; -} - +} + prof_tctx_t * prof_lookup(tsd_t *tsd, prof_bt_t *bt) { - union { + union { prof_tctx_t *p; - void *v; - } ret; + void *v; + } ret; prof_tdata_t *tdata; bool not_found; - - cassert(config_prof); - + + cassert(config_prof); + tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return NULL; } - + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) { /* Note double negative! */ @@ -1084,19 +1084,19 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { - void *btkey; + void *btkey; prof_gctx_t *gctx; bool new_gctx, error; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, &new_gctx)) { return NULL; } - + /* Link a prof_tctx_t into gctx for this thread. */ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, @@ -1104,13 +1104,13 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { if (ret.p == NULL) { if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } + } return NULL; - } + } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; ret.p->thr_discrim = tdata->thr_discrim; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); ret.p->gctx = gctx; ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; @@ -1124,17 +1124,17 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { } idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); return NULL; - } + } malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - + } + return ret.p; -} - +} + /* * The bodies of this function and prof_leakcheck() are compiled out unless heap * profiling is enabled, so that it is possible to compile jemalloc with @@ -1191,7 +1191,7 @@ prof_sample_threshold_update(prof_tdata_t *tdata) { #endif } -#ifdef JEMALLOC_JET +#ifdef JEMALLOC_JET static prof_tdata_t * prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { @@ -1202,7 +1202,7 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, return NULL; } -size_t +size_t prof_tdata_count(void) { size_t tdata_count = 0; tsdn_t *tsdn; @@ -1218,127 +1218,127 @@ prof_tdata_count(void) { size_t prof_bt_count(void) { - size_t bt_count; + size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; - + tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { return 0; } - + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - + return bt_count; -} -#endif - -static int +} +#endif + +static int prof_dump_open_impl(bool propagate_err, const char *filename) { - int fd; - - fd = creat(filename, 0644); + int fd; + + fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { - malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", - filename); + malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", + filename); if (opt_abort) { - abort(); + abort(); } - } - + } + return fd; -} +} prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; - -static bool + +static bool prof_dump_flush(bool propagate_err) { - bool ret = false; - ssize_t err; - - cassert(config_prof); - + bool ret = false; + ssize_t err; + + cassert(config_prof); + err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { + if (err == -1) { if (!propagate_err) { - malloc_write("<jemalloc>: write() failed during heap " - "profile flush\n"); + malloc_write("<jemalloc>: write() failed during heap " + "profile flush\n"); if (opt_abort) { - abort(); + abort(); } - } - ret = true; - } - prof_dump_buf_end = 0; - + } + ret = true; + } + prof_dump_buf_end = 0; + return ret; -} - -static bool +} + +static bool prof_dump_close(bool propagate_err) { - bool ret; - - assert(prof_dump_fd != -1); - ret = prof_dump_flush(propagate_err); - close(prof_dump_fd); - prof_dump_fd = -1; - + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + return ret; -} - -static bool +} + +static bool prof_dump_write(bool propagate_err, const char *s) { size_t i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { if (prof_dump_flush(propagate_err) && propagate_err) { return true; } } - + if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } assert(i == slen); - + return false; -} - +} + JEMALLOC_FORMAT_PRINTF(2, 3) -static bool +static bool prof_dump_printf(bool propagate_err, const char *format, ...) { - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_dump_write(propagate_err, buf); - + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + return ret; -} - -static void +} + +static void prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - + malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { @@ -1460,33 +1460,33 @@ label_return: static void prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { - cassert(config_prof); - + cassert(config_prof); + malloc_mutex_lock(tsdn, gctx->lock); - - /* + + /* * Increment nlimbo so that gctx won't go away before dump. * Additionally, link gctx into the dump list so that it is included in - * prof_dump()'s second pass. - */ + * prof_dump()'s second pass. + */ gctx->nlimbo++; gctx_tree_insert(gctxs, gctx); - + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - + malloc_mutex_unlock(tsdn, gctx->lock); } - + struct prof_gctx_merge_iter_arg_s { tsdn_t *tsdn; size_t leak_ngctx; }; - + static prof_gctx_t * prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { struct prof_gctx_merge_iter_arg_s *arg = (struct prof_gctx_merge_iter_arg_s *)opaque; - + malloc_mutex_lock(arg->tsdn, gctx->lock); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, (void *)arg->tsdn); @@ -1494,7 +1494,7 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { arg->leak_ngctx++; } malloc_mutex_unlock(arg->tsdn, gctx->lock); - + return NULL; } @@ -1532,7 +1532,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { next = NULL; } } while (next != NULL); - } + } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; @@ -1541,20 +1541,20 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { } else { malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } - } + } } - + struct prof_tdata_merge_iter_arg_s { tsdn_t *tsdn; prof_cnt_t cnt_all; }; - + static prof_tdata_t * prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *opaque) { struct prof_tdata_merge_iter_arg_s *arg = (struct prof_tdata_merge_iter_arg_s *)opaque; - + malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; @@ -1562,33 +1562,33 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, prof_tctx_t *p; void *v; } tctx; - + tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) { prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); } - + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } - } else { + } else { tdata->dumping = false; - } + } malloc_mutex_unlock(arg->tsdn, tdata->lock); - + return NULL; -} - +} + static prof_tdata_t * prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { bool propagate_err = *(bool *)arg; - + if (!tdata->dumping) { return NULL; } @@ -1603,13 +1603,13 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, return tdata; } return NULL; -} - +} + static bool prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; - + if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", @@ -1623,19 +1623,19 @@ prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, (void *)&propagate_err) != NULL); malloc_mutex_unlock(tsdn, &tdatas_mtx); return ret; -} +} prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; - -static bool + +static bool prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { - bool ret; - unsigned i; + bool ret; + unsigned i; struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; - - cassert(config_prof); + + cassert(config_prof); malloc_mutex_assert_owner(tsdn, gctx->lock); - + /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { @@ -1643,31 +1643,31 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, assert(gctx->cnt_summed.curbytes == 0); assert(gctx->cnt_summed.accumobjs == 0); assert(gctx->cnt_summed.accumbytes == 0); - ret = false; - goto label_return; - } - + ret = false; + goto label_return; + } + if (prof_dump_printf(propagate_err, "@")) { - ret = true; - goto label_return; - } - for (i = 0; i < bt->len; i++) { + ret = true; + goto label_return; + } + for (i = 0; i < bt->len; i++) { if (prof_dump_printf(propagate_err, " %#"FMTxPTR, - (uintptr_t)bt->vec[i])) { - ret = true; - goto label_return; - } - } - + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } + if (prof_dump_printf(propagate_err, "\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { - ret = true; - goto label_return; - } - + ret = true; + goto label_return; + } + prof_tctx_dump_iter_arg.tsdn = tsdn; prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, @@ -1676,11 +1676,11 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, goto label_return; } - ret = false; -label_return: + ret = false; +label_return: return ret; -} - +} + #ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int @@ -1715,17 +1715,17 @@ prof_getpid(void) { #endif } -static bool +static bool prof_dump_maps(bool propagate_err) { - bool ret; - int mfd; - - cassert(config_prof); -#ifdef __FreeBSD__ + bool ret; + int mfd; + + cassert(config_prof); +#ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); #elif defined(_WIN32) mfd = -1; // Not implemented -#else +#else { int pid = prof_getpid(); @@ -1734,48 +1734,48 @@ prof_dump_maps(bool propagate_err) { mfd = prof_open_maps("/proc/%d/maps", pid); } } -#endif - if (mfd != -1) { - ssize_t nread; - - if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) { - ret = true; - goto label_return; - } - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_dump_flush(propagate_err) && - propagate_err) { - ret = true; - goto label_return; - } - } +#endif + if (mfd != -1) { + ssize_t nread; + + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } + } nread = malloc_read_fd(mfd, &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - prof_dump_buf_end); - } while (nread > 0); - } else { - ret = true; - goto label_return; - } - - ret = false; -label_return: + } while (nread > 0); + } else { + ret = true; + goto label_return; + } + + ret = false; +label_return: if (mfd != -1) { - close(mfd); + close(mfd); } return ret; -} - +} + /* * See prof_sample_threshold_update() comment for why the body of this function * is conditionally compiled. */ -static void +static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, const char *filename) { #ifdef JEMALLOC_PROF @@ -1794,18 +1794,18 @@ prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, * scale_factor); uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * scale_factor); - + malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); - malloc_printf( + malloc_printf( "<jemalloc>: Run jeprof on \"%s\" for leak detail\n", - filename); - } + filename); + } #endif -} - +} + struct prof_gctx_dump_iter_arg_s { tsdn_t *tsdn; bool propagate_err; @@ -1836,14 +1836,14 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, prof_gctx_tree_t *gctxs) { - size_t tabind; - union { + size_t tabind; + union { prof_gctx_t *p; - void *v; + void *v; } gctx; - + prof_enter(tsd, tdata); - + /* * Put gctx's in limbo and clear their counters in preparation for * summing. @@ -1852,7 +1852,7 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); } - + /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. @@ -1863,13 +1863,13 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)prof_tdata_merge_iter_arg); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - + /* Merge tctx stats into gctx's. */ prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_merge_iter_arg->leak_ngctx = 0; gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, (void *)prof_gctx_merge_iter_arg); - + prof_leave(tsd, tdata); } @@ -1880,40 +1880,40 @@ prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, prof_gctx_tree_t *gctxs) { - /* Create dump file. */ + /* Create dump file. */ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { return true; } - - /* Dump profile header. */ + + /* Dump profile header. */ if (prof_dump_header(tsd_tsdn(tsd), propagate_err, &prof_tdata_merge_iter_arg->cnt_all)) { - goto label_write_error; + goto label_write_error; } - + /* Dump per gctx profile stats. */ prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); prof_gctx_dump_iter_arg->propagate_err = propagate_err; if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, (void *)prof_gctx_dump_iter_arg) != NULL) { goto label_write_error; - } - - /* Dump /proc/<pid>/maps if possible. */ + } + + /* Dump /proc/<pid>/maps if possible. */ if (prof_dump_maps(propagate_err)) { - goto label_write_error; + goto label_write_error; } - + if (prof_dump_close(propagate_err)) { return true; } - + return false; -label_write_error: - prof_dump_close(propagate_err); +label_write_error: + prof_dump_close(propagate_err); return true; -} - +} + static bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { @@ -2002,39 +2002,39 @@ prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void +static void prof_dump_filename(char *filename, char v, uint64_t vseq) { - cassert(config_prof); - - if (vseq != VSEQ_INVALID) { - /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + cassert(config_prof); + + if (vseq != VSEQ_INVALID) { + /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); - } else { - /* "<prefix>.<pid>.<seq>.<v>.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + } else { + /* "<prefix>.<pid>.<seq>.<v>.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", opt_prof_prefix, prof_getpid(), prof_dump_seq, v); - } - prof_dump_seq++; -} - -static void + } + prof_dump_seq++; +} + +static void prof_fdump(void) { tsd_t *tsd; - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); - + if (!prof_booted) { - return; + return; } tsd = tsd_fetch(); assert(tsd_reentrancy_level_get(tsd) == 0); - + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); @@ -2049,133 +2049,133 @@ prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { return true; - } + } prof_accum->accumbytes = 0; #else atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); #endif return false; -} - -void +} + +void prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; - - cassert(config_prof); - + + cassert(config_prof); + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; + return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { - return; + return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { - return; - } + return; + } if (tdata->enq) { tdata->enq_idump = true; return; } - - if (opt_prof_prefix[0] != '\0') { + + if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'i', prof_dump_iseq); - prof_dump_iseq++; + prof_dump_filename(filename, 'i', prof_dump_iseq); + prof_dump_iseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); - } -} - -bool + } +} + +bool prof_mdump(tsd_t *tsd, const char *filename) { - cassert(config_prof); + cassert(config_prof); assert(tsd_reentrancy_level_get(tsd) == 0); - + if (!opt_prof || !prof_booted) { return true; } char filename_buf[DUMP_FILENAME_BUFSIZE]; - if (filename == NULL) { - /* No filename specified, so automatically generate one. */ + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') { return true; } malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename_buf, 'm', prof_dump_mseq); - prof_dump_mseq++; + prof_dump_filename(filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - filename = filename_buf; - } + filename = filename_buf; + } return prof_dump(tsd, true, filename, false); -} - -void +} + +void prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; - - cassert(config_prof); - + + cassert(config_prof); + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; + return; } tsd = tsdn_tsd(tsdn); if (tsd_reentrancy_level_get(tsd) > 0) { - return; + return; } tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { - return; - } + return; + } if (tdata->enq) { tdata->enq_gdump = true; return; } - - if (opt_prof_prefix[0] != '\0') { + + if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); - prof_dump_filename(filename, 'u', prof_dump_useq); - prof_dump_useq++; + prof_dump_filename(filename, 'u', prof_dump_useq); + prof_dump_useq++; malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); - } -} - -static void + } +} + +static void prof_bt_hash(const void *key, size_t r_hash[2]) { - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -static bool + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +static bool prof_bt_keycomp(const void *k1, const void *k2) { - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + if (bt1->len != bt2->len) { return false; } - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + static void prof_bt_node_hash(const void *key, size_t r_hash[2]) { const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; prof_bt_hash((void *)(&bt_node->bt), r_hash); } - + static bool prof_bt_node_keycomp(const void *k1, const void *k2) { const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; @@ -2214,16 +2214,16 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) { return NULL; } - + tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; tdata->thr_discrim = thr_discrim; @@ -2236,8 +2236,8 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, prof_bt_keycomp)) { idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); return NULL; - } - + } + tdata->prng_state = (uint64_t)(uintptr_t)tdata; prof_sample_threshold_update(tdata); @@ -2265,26 +2265,26 @@ static bool prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) { return false; - } + } if (ckh_count(&tdata->bt2tctx) != 0) { return false; } return true; } - + static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsdn, tdata->lock); - + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } - + static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - + tdata_tree_remove(&tdatas, tdata); assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); @@ -2295,40 +2295,40 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, } ckh_delete(tsd, &tdata->bt2tctx); idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); -} - +} + static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } - + static void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; - + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, true); - /* + /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. - */ + */ if (!destroy_tdata) { tdata->attached = false; - } + } tsd_prof_tdata_set(tsd, NULL); } else { destroy_tdata = false; - } + } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, true); } -} - +} + prof_tdata_t * prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; @@ -2367,10 +2367,10 @@ prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } -void +void prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; - + assert(lg_sample < (sizeof(uint64_t) << 3)); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); @@ -2930,57 +2930,57 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) { void prof_boot0(void) { - cassert(config_prof); - - memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, - sizeof(PROF_PREFIX_DEFAULT)); -} - -void + cassert(config_prof); + + memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, + sizeof(PROF_PREFIX_DEFAULT)); +} + +void prof_boot1(void) { - cassert(config_prof); - - /* + cassert(config_prof); + + /* * opt_prof must be in its final state before any arenas are * initialized, so this function must be executed early. - */ - + */ + if (opt_prof_leak && !opt_prof) { - /* - * Enable opt_prof, but in such a way that profiles are never - * automatically dumped. - */ - opt_prof = true; - opt_prof_gdump = false; - } else if (opt_prof) { - if (opt_lg_prof_interval >= 0) { - prof_interval = (((uint64_t)1U) << - opt_lg_prof_interval); - } - } -} - -bool + /* + * Enable opt_prof, but in such a way that profiles are never + * automatically dumped. + */ + opt_prof = true; + opt_prof_gdump = false; + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } + } +} + +bool prof_boot2(tsd_t *tsd) { - cassert(config_prof); - - if (opt_prof) { - unsigned i; - + cassert(config_prof); + + if (opt_prof) { + unsigned i; + lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx, "prof_active", WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { return true; - } - + } + prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { return true; } - + prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx, "prof_thread_active_init", @@ -3021,12 +3021,12 @@ prof_boot2(tsd_t *tsd) { if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { - malloc_write("<jemalloc>: Error in atexit()\n"); + malloc_write("<jemalloc>: Error in atexit()\n"); if (opt_abort) { - abort(); + abort(); } - } - + } + if (opt_prof_log) { prof_log_start(tsd_tsdn(tsd), NULL); } @@ -3062,14 +3062,14 @@ prof_boot2(tsd_t *tsd) { if (gctx_locks == NULL) { return true; } - for (i = 0; i < PROF_NCTX_LOCKS; i++) { + for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", WITNESS_RANK_PROF_GCTX, malloc_mutex_rank_exclusive)) { return true; } - } - + } + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE); @@ -3083,24 +3083,24 @@ prof_boot2(tsd_t *tsd) { return true; } } -#ifdef JEMALLOC_PROF_LIBGCC +#ifdef JEMALLOC_PROF_LIBGCC /* * Cause the backtracing machinery to allocate its internal * state before enabling profiling. */ _Unwind_Backtrace(prof_unwind_init_callback, NULL); -#endif +#endif } - prof_booted = true; - + prof_booted = true; + return false; -} - -void +} + +void prof_prefork0(tsdn_t *tsdn) { if (config_prof && opt_prof) { - unsigned i; - + unsigned i; + malloc_mutex_prefork(tsdn, &prof_dump_mtx); malloc_mutex_prefork(tsdn, &bt2gctx_mtx); malloc_mutex_prefork(tsdn, &tdatas_mtx); @@ -3110,10 +3110,10 @@ prof_prefork0(tsdn_t *tsdn) { for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_prefork(tsdn, &gctx_locks[i]); } - } -} - -void + } +} + +void prof_prefork1(tsdn_t *tsdn) { if (config_prof && opt_prof) { malloc_mutex_prefork(tsdn, &prof_active_mtx); @@ -3123,12 +3123,12 @@ prof_prefork1(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); } } - + void prof_postfork_parent(tsdn_t *tsdn) { if (config_prof && opt_prof) { - unsigned i; - + unsigned i; + malloc_mutex_postfork_parent(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); @@ -3144,14 +3144,14 @@ prof_postfork_parent(tsdn_t *tsdn) { malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); - } -} - -void + } +} + +void prof_postfork_child(tsdn_t *tsdn) { if (config_prof && opt_prof) { - unsigned i; - + unsigned i; + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); @@ -3166,7 +3166,7 @@ prof_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &tdatas_mtx); malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); - } -} - -/******************************************************************************/ + } +} + +/******************************************************************************/ diff --git a/contrib/libs/jemalloc/src/rtree.c b/contrib/libs/jemalloc/src/rtree.c index 4ae41fe2fe..960e68cdc5 100644 --- a/contrib/libs/jemalloc/src/rtree.c +++ b/contrib/libs/jemalloc/src/rtree.c @@ -1,10 +1,10 @@ #define JEMALLOC_RTREE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" - + /* * Only the most significant bits of keys passed to rtree_{read,write}() are * used. @@ -18,15 +18,15 @@ rtree_new(rtree_t *rtree, bool zeroed) { #else assert(zeroed); #endif - + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, malloc_mutex_rank_exclusive)) { return true; - } - + } + return false; } - + static rtree_node_elm_t * rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * @@ -82,22 +82,22 @@ rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, rtree_leaf_dalloc(tsdn, rtree, leaf); } } - } - + } + if (subtree != rtree->root) { rtree_node_dalloc(tsdn, rtree, subtree); - } + } } # endif - + void rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { # if RTREE_HEIGHT > 1 rtree_delete_subtree(tsdn, rtree, rtree->root, 0); # endif -} +} #endif - + static rtree_node_elm_t * rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, atomic_p_t *elmp) { @@ -121,10 +121,10 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, atomic_store_p(elmp, node, ATOMIC_RELEASE); } malloc_mutex_unlock(tsdn, &rtree->init_lock); - + return node; } - + static rtree_leaf_elm_t * rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); @@ -139,32 +139,32 @@ rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { if (leaf == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; - } + } /* * Even though we hold the lock, a later reader might not; we * need release semantics. */ atomic_store_p(elmp, leaf, ATOMIC_RELEASE); - } + } malloc_mutex_unlock(tsdn, &rtree->init_lock); return leaf; -} - +} + static bool rtree_node_valid(rtree_node_elm_t *node) { return ((uintptr_t)node != (uintptr_t)0); } - + static bool rtree_leaf_valid(rtree_leaf_elm_t *leaf) { return ((uintptr_t)leaf != (uintptr_t)0); -} - +} + static rtree_node_elm_t * rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *node; - + if (dependent) { node = (rtree_node_elm_t *)atomic_load_p(&elm->child, ATOMIC_RELAXED); @@ -175,25 +175,25 @@ rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { assert(!dependent || node != NULL); return node; -} - +} + static rtree_node_elm_t * rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent) { rtree_node_elm_t *node; - + node = rtree_child_node_tryread(elm, dependent); if (!dependent && unlikely(!rtree_node_valid(node))) { node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); } assert(!dependent || node != NULL); return node; -} - +} + static rtree_leaf_elm_t * rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_leaf_elm_t *leaf; - + if (dependent) { leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, ATOMIC_RELAXED); @@ -204,7 +204,7 @@ rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { assert(!dependent || leaf != NULL); return leaf; -} +} static rtree_leaf_elm_t * rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, diff --git a/contrib/libs/jemalloc/src/stats.c b/contrib/libs/jemalloc/src/stats.c index 118e05d291..e663f4238e 100644 --- a/contrib/libs/jemalloc/src/stats.c +++ b/contrib/libs/jemalloc/src/stats.c @@ -1,13 +1,13 @@ #define JEMALLOC_STATS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/emitter.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" - + const char *global_mutex_names[mutex_prof_num_global_mutexes] = { #define OP(mtx) #mtx, MUTEX_PROF_GLOBAL_MUTEXES @@ -21,37 +21,37 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { }; #define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ + size_t sz = sizeof(t); \ xmallctl(n, (void *)v, &sz, NULL, 0); \ -} while (0) - +} while (0) + #define CTL_M2_GET(n, i, v, t) do { \ size_t mib[CTL_MAX_DEPTH]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ -} while (0) - +} while (0) + #define CTL_M2_M4_GET(n, i, j, v, t) do { \ size_t mib[CTL_MAX_DEPTH]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ mib[4] = (j); \ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ -} while (0) - -/******************************************************************************/ -/* Data. */ - +} while (0) + +/******************************************************************************/ +/* Data. */ + bool opt_stats_print = false; char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; - -/******************************************************************************/ - + +/******************************************************************************/ + static uint64_t rate_per_second(uint64_t value, uint64_t uptime_ns) { uint64_t billion = 1000000000; @@ -65,7 +65,7 @@ rate_per_second(uint64_t value, uint64_t uptime_ns) { return value / uptime_s; } } - + /* Calculate x.yyy and output a string (takes a fixed sized char array). */ static bool get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { @@ -76,7 +76,7 @@ get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { if (dividend > 0) { assert(UINT64_MAX / dividend >= 1000); } - + unsigned n = (unsigned)((dividend * 1000) / divisor); if (n < 10) { malloc_snprintf(str, 6, "0.00%u", n); @@ -92,7 +92,7 @@ get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { } #define MUTEX_CTL_STR_MAX_LENGTH 128 -static void +static void gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, const char *mutex, const char *counter) { malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); @@ -268,17 +268,17 @@ mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, static void stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) { - size_t page; + size_t page; bool in_gap, in_gap_prev; unsigned nbins, j; - - CTL_GET("arenas.page", &page, size_t); - - CTL_GET("arenas.nbins", &nbins, unsigned); - + + CTL_GET("arenas.page", &page, size_t); + + CTL_GET("arenas.nbins", &nbins, unsigned); + emitter_row_t header_row; emitter_row_init(&header_row); - + emitter_row_t row; emitter_row_init(&row); @@ -423,10 +423,10 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti * no meaningful utilization can be computed. */ malloc_snprintf(util, sizeof(util), " race"); - } else { + } else { not_reached(); - } - } + } + } col_size.size_val = reg_size; col_ind.unsigned_val = j; @@ -458,22 +458,22 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti */ emitter_table_row(emitter, &row); - } + } emitter_json_array_end(emitter); /* Close "bins". */ if (in_gap) { emitter_table_printf(emitter, " ---\n"); - } -} - -static void + } +} + +static void stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { unsigned nbins, nlextents, j; bool in_gap, in_gap_prev; - + CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlextents", &nlextents, unsigned); - + emitter_row_t header_row; emitter_row_init(&header_row); emitter_row_t row; @@ -497,9 +497,9 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { emitter_json_array_kv_begin(emitter, "lextents"); for (j = 0, in_gap = false; j < nlextents; j++) { - uint64_t nmalloc, ndalloc, nrequests; + uint64_t nmalloc, ndalloc, nrequests; size_t lextent_size, curlextents; - + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, @@ -512,7 +512,7 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { if (in_gap_prev && !in_gap) { emitter_table_printf(emitter, " ---\n"); - } + } CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, @@ -537,14 +537,14 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { if (!in_gap) { emitter_table_row(emitter, &row); } - } + } emitter_json_array_end(emitter); /* Close "lextents". */ if (in_gap) { emitter_table_printf(emitter, " ---\n"); } -} - -static void +} + +static void stats_arena_extents_print(emitter_t *emitter, unsigned i) { unsigned j; bool in_gap, in_gap_prev; @@ -660,36 +660,36 @@ stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptim static void stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, bool mutex, bool extents) { - unsigned nthreads; - const char *dss; + unsigned nthreads; + const char *dss; ssize_t dirty_decay_ms, muzzy_decay_ms; size_t page, pactive, pdirty, pmuzzy, mapped, retained; size_t base, internal, resident, metadata_thp, extent_avail; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; - size_t small_allocated; + size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills, small_nflushes; - size_t large_allocated; + size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills, large_nflushes; size_t tcache_bytes, abandoned_vm; uint64_t uptime; - - CTL_GET("arenas.page", &page, size_t); - + + CTL_GET("arenas.page", &page, size_t); + CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); emitter_kv(emitter, "nthreads", "assigned threads", emitter_type_unsigned, &nthreads); - + CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64, &uptime); - + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); emitter_kv(emitter, "dss", "dss allocation precedence", emitter_type_string, &dss); - + CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, ssize_t); CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, @@ -705,7 +705,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); - + emitter_row_t decay_row; emitter_row_init(&decay_row); @@ -764,11 +764,11 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, } else { col_decay_time.type = emitter_type_title; col_decay_time.str_val = "N/A"; - } - + } + col_decay_npages.type = emitter_type_size; col_decay_npages.size_val = pdirty; - + col_decay_sweeps.type = emitter_type_uint64; col_decay_sweeps.uint64_val = dirty_npurge; @@ -789,22 +789,22 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, } else { col_decay_time.type = emitter_type_title; col_decay_time.str_val = "N/A"; - } - + } + col_decay_npages.type = emitter_type_size; col_decay_npages.size_val = pmuzzy; - + col_decay_sweeps.type = emitter_type_uint64; col_decay_sweeps.uint64_val = muzzy_npurge; - + col_decay_madvises.type = emitter_type_uint64; col_decay_madvises.uint64_val = muzzy_nmadvise; - + col_decay_purged.type = emitter_type_uint64; col_decay_purged.uint64_val = muzzy_purged; - + emitter_table_row(emitter, &decay_row); - + /* Small / large / total allocation counts. */ emitter_row_t alloc_count_row; emitter_row_init(&alloc_count_row); @@ -1102,33 +1102,33 @@ stats_general_print(emitter_t *emitter) { #undef OPT_WRITE #undef OPT_WRITE_MUTABLE -#undef OPT_WRITE_BOOL +#undef OPT_WRITE_BOOL #undef OPT_WRITE_BOOL_MUTABLE #undef OPT_WRITE_UNSIGNED -#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_SSIZE_T_MUTABLE -#undef OPT_WRITE_CHAR_P - +#undef OPT_WRITE_CHAR_P + /* prof. */ if (config_prof) { emitter_dict_begin(emitter, "prof", "Profiling settings"); - + CTL_GET("prof.thread_active_init", &bv, bool); emitter_kv(emitter, "thread_active_init", "prof.thread_active_init", emitter_type_bool, &bv); - + CTL_GET("prof.active", &bv, bool); emitter_kv(emitter, "active", "prof.active", emitter_type_bool, &bv); - + CTL_GET("prof.gdump", &bv, bool); emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool, &bv); - + CTL_GET("prof.interval", &u64v, uint64_t); emitter_kv(emitter, "interval", "prof.interval", emitter_type_uint64, &u64v); - + CTL_GET("prof.lg_sample", &ssv, ssize_t); emitter_kv(emitter, "lg_sample", "prof.lg_sample", emitter_type_ssize, &ssv); @@ -1203,10 +1203,10 @@ stats_general_print(emitter_t *emitter) { &u32v); emitter_json_object_end(emitter); - } + } emitter_json_array_end(emitter); /* Close "bin". */ } - + unsigned nlextents; CTL_GET("arenas.nlextents", &nlextents, unsigned); emitter_kv(emitter, "nlextents", "Number of large size classes", @@ -1222,13 +1222,13 @@ stats_general_print(emitter_t *emitter) { &sv); emitter_json_object_end(emitter); - } + } emitter_json_array_end(emitter); /* Close "lextent". */ - } - + } + emitter_json_object_end(emitter); /* Close "arenas" */ } - + static void stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, bool unmerged, bool bins, bool large, bool mutex, bool extents) { @@ -1240,7 +1240,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, retained; size_t num_background_threads; uint64_t background_thread_num_runs, background_thread_run_interval; - + CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.metadata", &metadata, size_t); @@ -1248,7 +1248,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, CTL_GET("stats.resident", &resident, size_t); CTL_GET("stats.mapped", &mapped, size_t); CTL_GET("stats.retained", &retained, size_t); - + if (have_background_thread) { CTL_GET("stats.background_thread.num_threads", &num_background_threads, size_t); @@ -1261,7 +1261,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, background_thread_num_runs = 0; background_thread_run_interval = 0; } - + /* Generic global stats. */ emitter_json_object_kv_begin(emitter, "stats"); emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated); @@ -1272,12 +1272,12 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_json_kv(emitter, "resident", emitter_type_size, &resident); emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); emitter_json_kv(emitter, "retained", emitter_type_size, &retained); - + emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, " "retained: %zu\n", allocated, active, metadata, metadata_thp, resident, mapped, retained); - + /* Background thread stats. */ emitter_json_object_kv_begin(emitter, "background_thread"); emitter_json_kv(emitter, "num_threads", emitter_type_size, @@ -1287,7 +1287,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_json_kv(emitter, "run_interval", emitter_type_uint64, &background_thread_run_interval); emitter_json_object_end(emitter); /* Close "background_thread". */ - + emitter_table_printf(emitter, "Background threads: %zu, " "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n", num_background_threads, background_thread_num_runs, @@ -1314,18 +1314,18 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_json_object_kv_begin(emitter, global_mutex_names[i]); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_object_end(emitter); - } - + } + emitter_json_object_end(emitter); /* Close "mutexes". */ } - + emitter_json_object_end(emitter); /* Close "stats". */ - + if (merged || destroyed || unmerged) { unsigned narenas; - + emitter_json_object_kv_begin(emitter, "stats.arenas"); - + CTL_GET("arenas.narenas", &narenas, unsigned); size_t mib[3]; size_t miblen = sizeof(mib) / sizeof(size_t); @@ -1385,12 +1385,12 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, large, mutex, extents); /* Close "<arena-ind>". */ emitter_json_object_end(emitter); - } - } - } + } + } + } emitter_json_object_end(emitter); /* Close "stats.arenas". */ - } -} + } +} void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, diff --git a/contrib/libs/jemalloc/src/tcache.c b/contrib/libs/jemalloc/src/tcache.c index 50099a9f2c..9bcd32ab4a 100644 --- a/contrib/libs/jemalloc/src/tcache.c +++ b/contrib/libs/jemalloc/src/tcache.c @@ -1,24 +1,24 @@ #define JEMALLOC_TCACHE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/sc.h" -/******************************************************************************/ -/* Data. */ - -bool opt_tcache = true; -ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; - +/******************************************************************************/ +/* Data. */ + +bool opt_tcache = true; +ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; + cache_bin_info_t *tcache_bin_info; -static unsigned stack_nelms; /* Total stack elms per tcache. */ - +static unsigned stack_nelms; /* Total stack elms per tcache. */ + unsigned nhbins; -size_t tcache_maxclass; - +size_t tcache_maxclass; + tcaches_t *tcaches; /* Index of first element within tcaches that has never been used. */ @@ -30,27 +30,27 @@ static tcaches_t *tcaches_avail; /* Protects tcaches{,_past,_avail}. */ static malloc_mutex_t tcaches_mtx; -/******************************************************************************/ - +/******************************************************************************/ + size_t tcache_salloc(tsdn_t *tsdn, const void *ptr) { return arena_salloc(tsdn, ptr); -} - -void +} + +void tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; - + cache_bin_t *tbin; if (binind < SC_NBINS) { tbin = tcache_small_bin_get(tcache, binind); } else { tbin = tcache_large_bin_get(tcache, binind); } - if (tbin->low_water > 0) { - /* - * Flush (ceiling) 3/4 of the objects below the low water mark. - */ + if (tbin->low_water > 0) { + /* + * Flush (ceiling) 3/4 of the objects below the low water mark. + */ if (binind < SC_NBINS) { tcache_bin_flush_small(tsd, tcache, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water @@ -64,43 +64,43 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { (tcache->lg_fill_div[binind] + 1)) >= 1) { tcache->lg_fill_div[binind]++; } - } else { + } else { tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); - } - } else if (tbin->low_water < 0) { - /* + } + } else if (tbin->low_water < 0) { + /* * Increase fill count by 2X for small bins. Make sure * lg_fill_div stays greater than 0. - */ + */ if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) { tcache->lg_fill_div[binind]--; } - } - tbin->low_water = tbin->ncached; - - tcache->next_gc_bin++; + } + tbin->low_water = tbin->ncached; + + tcache->next_gc_bin++; if (tcache->next_gc_bin == nhbins) { - tcache->next_gc_bin = 0; + tcache->next_gc_bin = 0; } -} - -void * +} + +void * tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, bool *tcache_success) { - void *ret; - + void *ret; + assert(tcache->arena != NULL); arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, - config_prof ? tcache->prof_accumbytes : 0); + config_prof ? tcache->prof_accumbytes : 0); if (config_prof) { - tcache->prof_accumbytes = 0; + tcache->prof_accumbytes = 0; } ret = cache_bin_alloc_easy(tbin, tcache_success); - + return ret; -} - +} + /* Enabled with --enable-extra-size-check. */ static void tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, @@ -130,14 +130,14 @@ tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, } } -void +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, unsigned rem) { - bool merged_stats = false; - + bool merged_stats = false; + assert(binind < SC_NBINS); assert((cache_bin_sz_t)rem <= tbin->ncached); - + arena_t *arena = tcache->arena; assert(arena != NULL); unsigned nflush = tbin->ncached - rem; @@ -154,7 +154,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, } } while (nflush > 0) { - /* Lock the arena bin associated with the first object. */ + /* Lock the arena bin associated with the first object. */ extent_t *extent = item_extent[0]; unsigned bin_arena_ind = extent_arena_ind_get(extent); arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind, @@ -162,22 +162,22 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, unsigned binshard = extent_binshard_get(extent); assert(binshard < bin_infos[binind].n_shards); bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard]; - + if (config_prof && bin_arena == arena) { if (arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); } - tcache->prof_accumbytes = 0; - } - + tcache->prof_accumbytes = 0; + } + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena && !merged_stats) { - merged_stats = true; - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } + merged_stats = true; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } unsigned ndeferred = 0; for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); @@ -188,52 +188,52 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, && extent_binshard_get(extent) == binshard) { arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), bin_arena, bin, binind, extent, ptr); - } else { - /* - * This object was allocated via a different - * arena bin than the one that is currently - * locked. Stash the object, so that it can be - * handled in a future pass. - */ + } else { + /* + * This object was allocated via a different + * arena bin than the one that is currently + * locked. Stash the object, so that it can be + * handled in a future pass. + */ *(tbin->avail - 1 - ndeferred) = ptr; item_extent[ndeferred] = extent; - ndeferred++; - } - } + ndeferred++; + } + } malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); nflush = ndeferred; - } + } if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ unsigned binshard; bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind, &binshard); - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); - } - + } + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); - tbin->ncached = rem; + tbin->ncached = rem; if (tbin->ncached < tbin->low_water) { - tbin->low_water = tbin->ncached; + tbin->low_water = tbin->ncached; } -} - -void +} + +void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { - bool merged_stats = false; - - assert(binind < nhbins); + bool merged_stats = false; + + assert(binind < nhbins); assert((cache_bin_sz_t)rem <= tbin->ncached); - + arena_t *tcache_arena = tcache->arena; assert(tcache_arena != NULL); unsigned nflush = tbin->ncached - rem; @@ -249,15 +249,15 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, item_extent); #endif while (nflush > 0) { - /* Lock the arena associated with the first object. */ + /* Lock the arena associated with the first object. */ extent_t *extent = item_extent[0]; unsigned locked_arena_ind = extent_arena_ind_get(extent); arena_t *locked_arena = arena_get(tsd_tsdn(tsd), locked_arena_ind, false); bool idump; - + if (config_prof) { - idump = false; + idump = false; } bool lock_large = !arena_is_auto(locked_arena); @@ -275,19 +275,19 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, } if ((config_prof || config_stats) && (locked_arena == tcache_arena)) { - if (config_prof) { + if (config_prof) { idump = arena_prof_accum(tsd_tsdn(tsd), tcache_arena, tcache->prof_accumbytes); - tcache->prof_accumbytes = 0; - } - if (config_stats) { - merged_stats = true; + tcache->prof_accumbytes = 0; + } + if (config_stats) { + merged_stats = true; arena_stats_large_flush_nrequests_add( tsd_tsdn(tsd), &tcache_arena->stats, binind, tbin->tstats.nrequests); - tbin->tstats.nrequests = 0; - } - } + tbin->tstats.nrequests = 0; + } + } if (lock_large) { malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); } @@ -301,53 +301,53 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, if (extent_arena_ind_get(extent) == locked_arena_ind) { large_dalloc_finish(tsd_tsdn(tsd), extent); } else { - /* - * This object was allocated via a different - * arena than the one that is currently locked. - * Stash the object, so that it can be handled - * in a future pass. - */ + /* + * This object was allocated via a different + * arena than the one that is currently locked. + * Stash the object, so that it can be handled + * in a future pass. + */ *(tbin->avail - 1 - ndeferred) = ptr; item_extent[ndeferred] = extent; - ndeferred++; - } - } + ndeferred++; + } + } if (config_prof && idump) { prof_idump(tsd_tsdn(tsd)); } arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - ndeferred); nflush = ndeferred; - } + } if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd), &tcache_arena->stats, binind, tbin->tstats.nrequests); - tbin->tstats.nrequests = 0; - } - + tbin->tstats.nrequests = 0; + } + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); - tbin->ncached = rem; + tbin->ncached = rem; if (tbin->ncached < tbin->low_water) { - tbin->low_water = tbin->ncached; + tbin->low_water = tbin->ncached; } -} - -void +} + +void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { assert(tcache->arena == NULL); tcache->arena = arena; - - if (config_stats) { - /* Link into list of extant tcaches. */ + + if (config_stats) { + /* Link into list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); cache_bin_array_descriptor_init( &tcache->cache_bin_array_descriptor, tcache->bins_small, tcache->bins_large); @@ -355,15 +355,15 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { &tcache->cache_bin_array_descriptor, link); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); - } -} - + } +} + static void tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { arena_t *arena = tcache->arena; assert(arena != NULL); - if (config_stats) { - /* Unlink from list of extant tcaches. */ + if (config_stats) { + /* Unlink from list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); if (config_debug) { bool in_ql = false; @@ -381,10 +381,10 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { &tcache->cache_bin_array_descriptor, link); tcache_stats_merge(tsdn, tcache, arena); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); - } + } tcache->arena = NULL; -} - +} + void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { tcache_arena_dissociate(tsdn, tcache); @@ -482,72 +482,72 @@ tsd_tcache_data_init(tsd_t *tsd) { } /* Created manual tcache for tcache.create mallctl. */ -tcache_t * +tcache_t * tcache_create_explicit(tsd_t *tsd) { - tcache_t *tcache; - size_t size, stack_offset; - + tcache_t *tcache; + size_t size, stack_offset; + size = sizeof(tcache_t); - /* Naturally align the pointer stacks. */ - size = PTR_CEILING(size); - stack_offset = size; - size += stack_nelms * sizeof(void *); + /* Naturally align the pointer stacks. */ + size = PTR_CEILING(size); + stack_offset = size; + size += stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sz_sa2u(size, CACHELINE); - + tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) { return NULL; - } - + } + tcache_init(tsd, tcache, (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); - + return tcache; -} - +} + static void tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { assert(tcache->arena != NULL); - + for (unsigned i = 0; i < SC_NBINS; i++) { cache_bin_t *tbin = tcache_small_bin_get(tcache, i); tcache_bin_flush_small(tsd, tcache, tbin, i, 0); - + if (config_stats) { assert(tbin->tstats.nrequests == 0); - } - } + } + } for (unsigned i = SC_NBINS; i < nhbins; i++) { cache_bin_t *tbin = tcache_large_bin_get(tcache, i); tcache_bin_flush_large(tsd, tbin, i, 0, tcache); - + if (config_stats) { assert(tbin->tstats.nrequests == 0); - } - } - - if (config_prof && tcache->prof_accumbytes > 0 && + } + } + + if (config_prof && tcache->prof_accumbytes > 0 && arena_prof_accum(tsd_tsdn(tsd), tcache->arena, tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); } } - + void tcache_flush(tsd_t *tsd) { assert(tcache_available(tsd)); tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); } - + static void tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { tcache_flush_cache(tsd, tcache); arena_t *arena = tcache->arena; tcache_arena_dissociate(tsd_tsdn(tsd), tcache); - + if (tsd_tcache) { /* Release the avail array for the TSD embedded auto tcache. */ void *avail_array = @@ -575,10 +575,10 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { } else { arena_decay(tsd_tsdn(tsd), arena, false, false); } -} - +} + /* For auto tcache (embedded in TSD) only. */ -void +void tcache_cleanup(tsd_t *tsd) { tcache_t *tcache = tsd_tcachep_get(tsd); if (!tcache_available(tsd)) { @@ -590,37 +590,37 @@ tcache_cleanup(tsd_t *tsd) { } assert(tsd_tcache_enabled_get(tsd)); assert(tcache_small_bin_get(tcache, 0)->avail != NULL); - + tcache_destroy(tsd, tcache, true); if (config_debug) { tcache_small_bin_get(tcache, 0)->avail = NULL; - } -} - -void + } +} + +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { - unsigned i; - - cassert(config_stats); - - /* Merge and reset tcache stats. */ + unsigned i; + + cassert(config_stats); + + /* Merge and reset tcache stats. */ for (i = 0; i < SC_NBINS; i++) { cache_bin_t *tbin = tcache_small_bin_get(tcache, i); unsigned binshard; bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard); - bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsdn, &bin->lock); - tbin->tstats.nrequests = 0; - } - - for (; i < nhbins; i++) { + tbin->tstats.nrequests = 0; + } + + for (; i < nhbins; i++) { cache_bin_t *tbin = tcache_large_bin_get(tcache, i); arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i, tbin->tstats.nrequests); - tbin->tstats.nrequests = 0; - } -} - + tbin->tstats.nrequests = 0; + } +} + static bool tcaches_create_prep(tsd_t *tsd) { bool err; @@ -647,17 +647,17 @@ label_return: return err; } -bool +bool tcaches_create(tsd_t *tsd, unsigned *r_ind) { witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); - + bool err; - + if (tcaches_create_prep(tsd)) { err = true; goto label_return; } - + tcache_t *tcache = tcache_create_explicit(tsd); if (tcache == NULL) { err = true; @@ -746,53 +746,53 @@ tcache_boot(tsdn_t *tsdn) { nhbins = sz_size2index(tcache_maxclass) + 1; - /* Initialize tcache_bin_info. */ + /* Initialize tcache_bin_info. */ tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins * sizeof(cache_bin_info_t), CACHELINE); if (tcache_bin_info == NULL) { return true; } - stack_nelms = 0; + stack_nelms = 0; unsigned i; for (i = 0; i < SC_NBINS; i++) { if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { - tcache_bin_info[i].ncached_max = + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; } else if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (bin_infos[i].nregs << 1); - } else { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MAX; - } - stack_nelms += tcache_bin_info[i].ncached_max; - } - for (; i < nhbins; i++) { - tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; - stack_nelms += tcache_bin_info[i].ncached_max; - } - + } else { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MAX; + } + stack_nelms += tcache_bin_info[i].ncached_max; + } + for (; i < nhbins; i++) { + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; + stack_nelms += tcache_bin_info[i].ncached_max; + } + return false; -} - +} + void tcache_prefork(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_prefork(tsdn, &tcaches_mtx); } } - + void tcache_postfork_parent(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); } } - + void tcache_postfork_child(tsdn_t *tsdn) { if (!config_prof && opt_tcache) { malloc_mutex_postfork_child(tsdn, &tcaches_mtx); } -} +} diff --git a/contrib/libs/jemalloc/src/tsd.c b/contrib/libs/jemalloc/src/tsd.c index a31f6b9698..e79c171b12 100644 --- a/contrib/libs/jemalloc/src/tsd.c +++ b/contrib/libs/jemalloc/src/tsd.c @@ -1,17 +1,17 @@ #define JEMALLOC_TSD_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" - + #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" -/******************************************************************************/ -/* Data. */ - -static unsigned ncleanups; -static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; - +/******************************************************************************/ +/* Data. */ + +static unsigned ncleanups; +static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; + /* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS @@ -55,13 +55,13 @@ bool tsd_booted = false; JEMALLOC_DIAGNOSTIC_POP -/******************************************************************************/ - +/******************************************************************************/ + /* A list of all the tsds in the nominal state. */ typedef ql_head(tsd_t) tsd_list_t; static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds); static malloc_mutex_t tsd_nominal_tsds_lock; - + /* How many slow-path-enabling features are turned on. */ static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0); @@ -82,8 +82,8 @@ tsd_in_nominal_list(tsd_t *tsd) { } malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock); return found; -} - +} + static void tsd_add_nominal(tsd_t *tsd) { assert(!tsd_in_nominal_list(tsd)); @@ -121,7 +121,7 @@ tsd_force_recompute(tsdn_t *tsdn) { malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock); } -void +void tsd_global_slow_inc(tsdn_t *tsdn) { atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED); /* @@ -134,13 +134,13 @@ tsd_global_slow_inc(tsdn_t *tsdn) { */ tsd_force_recompute(tsdn); } - + void tsd_global_slow_dec(tsdn_t *tsdn) { atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED); /* See the note in ..._inc(). */ tsd_force_recompute(tsdn); -} - +} + static bool tsd_local_slow(tsd_t *tsd) { return !tsd_tcache_enabled_get(tsd) @@ -167,7 +167,7 @@ tsd_state_compute(tsd_t *tsd) { } } -void +void tsd_slow_update(tsd_t *tsd) { uint8_t old_state; do { @@ -176,7 +176,7 @@ tsd_slow_update(tsd_t *tsd) { ATOMIC_ACQUIRE); } while (old_state == tsd_state_nominal_recompute); } - + void tsd_state_set(tsd_t *tsd, uint8_t new_state) { /* Only the tsd module can change the state *to* recompute. */ @@ -213,8 +213,8 @@ tsd_state_set(tsd_t *tsd, uint8_t new_state) { tsd_slow_update(tsd); } } -} - +} + static bool tsd_data_init(tsd_t *tsd) { /* @@ -325,40 +325,40 @@ malloc_tsd_dalloc(void *wrapper) { a0dalloc(wrapper); } -#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) -#ifndef _WIN32 -JEMALLOC_EXPORT -#endif -void +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +#ifndef _WIN32 +JEMALLOC_EXPORT +#endif +void _malloc_thread_cleanup(void) { - bool pending[MALLOC_TSD_CLEANUPS_MAX], again; - unsigned i; - + bool pending[MALLOC_TSD_CLEANUPS_MAX], again; + unsigned i; + for (i = 0; i < ncleanups; i++) { - pending[i] = true; + pending[i] = true; } - - do { - again = false; - for (i = 0; i < ncleanups; i++) { - if (pending[i]) { - pending[i] = cleanups[i](); + + do { + again = false; + for (i = 0; i < ncleanups; i++) { + if (pending[i]) { + pending[i] = cleanups[i](); if (pending[i]) { - again = true; + again = true; } - } - } - } while (again); -} -#endif - -void + } + } + } while (again); +} +#endif + +void malloc_tsd_cleanup_register(bool (*f)(void)) { - assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); - cleanups[ncleanups] = f; - ncleanups++; -} - + assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); + cleanups[ncleanups] = f; + ncleanups++; +} + static void tsd_do_data_cleanup(tsd_t *tsd) { prof_tdata_cleanup(tsd); @@ -369,10 +369,10 @@ tsd_do_data_cleanup(tsd_t *tsd) { witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); } -void +void tsd_cleanup(void *arg) { tsd_t *tsd = (tsd_t *)arg; - + switch (tsd_state_get(tsd)) { case tsd_state_uninitialized: /* Do nothing. */ @@ -418,7 +418,7 @@ tsd_t * malloc_tsd_boot0(void) { tsd_t *tsd; - ncleanups = 0; + ncleanups = 0; if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock", WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) { return NULL; @@ -429,8 +429,8 @@ malloc_tsd_boot0(void) { tsd = tsd_fetch(); *tsd_arenas_tdata_bypassp_get(tsd) = true; return tsd; -} - +} + void malloc_tsd_boot1(void) { tsd_boot1(); @@ -440,24 +440,24 @@ malloc_tsd_boot1(void) { *tsd_arenas_tdata_bypassp_get(tsd) = false; } -#ifdef _WIN32 -static BOOL WINAPI +#ifdef _WIN32 +static BOOL WINAPI _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { - switch (fdwReason) { -#ifdef JEMALLOC_LAZY_LOCK - case DLL_THREAD_ATTACH: - isthreaded = true; - break; -#endif - case DLL_THREAD_DETACH: - _malloc_thread_cleanup(); - break; - default: - break; - } + switch (fdwReason) { +#ifdef JEMALLOC_LAZY_LOCK + case DLL_THREAD_ATTACH: + isthreaded = true; + break; +#endif + case DLL_THREAD_DETACH: + _malloc_thread_cleanup(); + break; + default: + break; + } return true; -} - +} + /* * We need to be able to say "read" here (in the "pragma section"), but have * hooked "read". We won't read for the rest of the file, so we can get away @@ -467,51 +467,51 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { # undef read #endif -#ifdef _MSC_VER -# ifdef _M_IX86 -# pragma comment(linker, "/INCLUDE:__tls_used") +#ifdef _MSC_VER +# ifdef _M_IX86 +# pragma comment(linker, "/INCLUDE:__tls_used") # pragma comment(linker, "/INCLUDE:_tls_callback") -# else -# pragma comment(linker, "/INCLUDE:_tls_used") +# else +# pragma comment(linker, "/INCLUDE:_tls_used") # pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) ) -# endif -# pragma section(".CRT$XLY",long,read) -#endif -JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) +# endif +# pragma section(".CRT$XLY",long,read) +#endif +JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, - DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; -#endif - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void * + DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; +#endif + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void * tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { - pthread_t self = pthread_self(); - tsd_init_block_t *iter; - - /* Check whether this thread has already inserted into the list. */ + pthread_t self = pthread_self(); + tsd_init_block_t *iter; + + /* Check whether this thread has already inserted into the list. */ malloc_mutex_lock(TSDN_NULL, &head->lock); - ql_foreach(iter, &head->blocks, link) { - if (iter->thread == self) { + ql_foreach(iter, &head->blocks, link) { + if (iter->thread == self) { malloc_mutex_unlock(TSDN_NULL, &head->lock); return iter->data; - } - } - /* Insert block into list. */ - ql_elm_new(block, link); - block->thread = self; - ql_tail_insert(&head->blocks, block, link); + } + } + /* Insert block into list. */ + ql_elm_new(block, link); + block->thread = self; + ql_tail_insert(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); return NULL; -} - -void +} + +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { malloc_mutex_lock(TSDN_NULL, &head->lock); - ql_remove(&head->blocks, block, link); + ql_remove(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); -} -#endif +} +#endif void tsd_prefork(tsd_t *tsd) { diff --git a/contrib/libs/jemalloc/src/zone.c b/contrib/libs/jemalloc/src/zone.c index 23dfdd04a9..3a750c33e2 100644 --- a/contrib/libs/jemalloc/src/zone.c +++ b/contrib/libs/jemalloc/src/zone.c @@ -3,10 +3,10 @@ #include "jemalloc/internal/assert.h" -#ifndef JEMALLOC_ZONE -# error "This source file is for zones on Darwin (OS X)." -#endif - +#ifndef JEMALLOC_ZONE +# error "This source file is for zones on Darwin (OS X)." +#endif + /* Definitions of the following structs in malloc/malloc.h might be too old * for the built binary to run on newer versions of OSX. So use the newest * possible version of those structs. @@ -76,140 +76,140 @@ extern void malloc_zone_register(malloc_zone_t *zone); extern void malloc_zone_unregister(malloc_zone_t *zone); -/* +/* * The malloc_default_purgeable_zone() function is only available on >= 10.6. - * We need to check whether it is present at runtime, thus the weak_import. - */ -extern malloc_zone_t *malloc_default_purgeable_zone(void) -JEMALLOC_ATTR(weak_import); - -/******************************************************************************/ -/* Data. */ - + * We need to check whether it is present at runtime, thus the weak_import. + */ +extern malloc_zone_t *malloc_default_purgeable_zone(void) +JEMALLOC_ATTR(weak_import); + +/******************************************************************************/ +/* Data. */ + static malloc_zone_t *default_zone, *purgeable_zone; static malloc_zone_t jemalloc_zone; static struct malloc_introspection_t jemalloc_zone_introspect; static pid_t zone_force_lock_pid = -1; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + static size_t zone_size(malloc_zone_t *zone, const void *ptr); -static void *zone_malloc(malloc_zone_t *zone, size_t size); -static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); -static void *zone_valloc(malloc_zone_t *zone, size_t size); -static void zone_free(malloc_zone_t *zone, void *ptr); -static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -static void *zone_memalign(malloc_zone_t *zone, size_t alignment, - size_t size); -static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, - size_t size); +static void *zone_malloc(malloc_zone_t *zone, size_t size); +static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); +static void *zone_valloc(malloc_zone_t *zone, size_t size); +static void zone_free(malloc_zone_t *zone, void *ptr); +static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +static void *zone_memalign(malloc_zone_t *zone, size_t alignment, + size_t size); +static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, + size_t size); static void zone_destroy(malloc_zone_t *zone); static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested); static void zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed); static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); -static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static size_t zone_good_size(malloc_zone_t *zone, size_t size); static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); static boolean_t zone_check(malloc_zone_t *zone); static void zone_print(malloc_zone_t *zone, boolean_t verbose); static void zone_log(malloc_zone_t *zone, void *address); -static void zone_force_lock(malloc_zone_t *zone); -static void zone_force_unlock(malloc_zone_t *zone); +static void zone_force_lock(malloc_zone_t *zone); +static void zone_force_unlock(malloc_zone_t *zone); static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats); static boolean_t zone_locked(malloc_zone_t *zone); static void zone_reinit_lock(malloc_zone_t *zone); - -/******************************************************************************/ -/* - * Functions. - */ - -static size_t + +/******************************************************************************/ +/* + * Functions. + */ + +static size_t zone_size(malloc_zone_t *zone, const void *ptr) { - /* - * There appear to be places within Darwin (such as setenv(3)) that - * cause calls to this function with pointers that *no* zone owns. If - * we knew that all pointers were owned by *some* zone, we could split - * our zone into two parts, and use one as the default allocator and - * the other as the default deallocator/reallocator. Since that will - * not work in practice, we must check all pointers to assure that they + /* + * There appear to be places within Darwin (such as setenv(3)) that + * cause calls to this function with pointers that *no* zone owns. If + * we knew that all pointers were owned by *some* zone, we could split + * our zone into two parts, and use one as the default allocator and + * the other as the default deallocator/reallocator. Since that will + * not work in practice, we must check all pointers to assure that they * reside within a mapped extent before determining size. - */ + */ return ivsalloc(tsdn_fetch(), ptr); -} - -static void * +} + +static void * zone_malloc(malloc_zone_t *zone, size_t size) { return je_malloc(size); -} - -static void * +} + +static void * zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { return je_calloc(num, size); -} - -static void * +} + +static void * zone_valloc(malloc_zone_t *zone, size_t size) { - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, PAGE, size); - + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, PAGE, size); + return ret; -} - -static void +} + +static void zone_free(malloc_zone_t *zone, void *ptr) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { - je_free(ptr); - return; - } - - free(ptr); -} - -static void * + je_free(ptr); + return; + } + + free(ptr); +} + +static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { return je_realloc(ptr, size); } - + return realloc(ptr, size); -} - -static void * +} + +static void * zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, alignment, size); - + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, alignment, size); + return ret; -} - -static void +} + +static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { size_t alloc_size; - + alloc_size = ivsalloc(tsdn_fetch(), ptr); if (alloc_size != 0) { assert(alloc_size == size); - je_free(ptr); - return; - } - - free(ptr); -} - + je_free(ptr); + return; + } + + free(ptr); +} + static void zone_destroy(malloc_zone_t *zone) { - /* This function should never be called. */ - not_reached(); -} - + /* This function should never be called. */ + not_reached(); +} + static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) { @@ -235,19 +235,19 @@ zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, } } -static size_t +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { return 0; } - + static size_t zone_good_size(malloc_zone_t *zone, size_t size) { if (size == 0) { - size = 1; + size = 1; } return sz_s2u(size); -} - +} + static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, @@ -260,10 +260,10 @@ zone_check(malloc_zone_t *zone) { return true; } -static void +static void zone_print(malloc_zone_t *zone, boolean_t verbose) { } - + static void zone_log(malloc_zone_t *zone, void *address) { } @@ -277,11 +277,11 @@ zone_force_lock(malloc_zone_t *zone) { */ assert(zone_force_lock_pid == -1); zone_force_lock_pid = getpid(); - jemalloc_prefork(); + jemalloc_prefork(); } -} - -static void +} + +static void zone_force_unlock(malloc_zone_t *zone) { /* * zone_force_lock and zone_force_unlock are the entry points to the @@ -305,7 +305,7 @@ zone_force_unlock(malloc_zone_t *zone) { zone_force_lock_pid = -1; } } - + static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { /* We make no effort to actually fill the values */ @@ -313,8 +313,8 @@ zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { stats->size_in_use = 0; stats->max_size_in_use = 0; stats->size_allocated = 0; -} - +} + static boolean_t zone_locked(malloc_zone_t *zone) { /* Pretend no lock is being held */ @@ -433,37 +433,37 @@ zone_promote(void) { } while (zone != &jemalloc_zone); } -JEMALLOC_ATTR(constructor) -void +JEMALLOC_ATTR(constructor) +void zone_register(void) { - /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. - */ + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ default_zone = zone_default_get(); if (!default_zone->zone_name || strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { - return; - } - - /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so + return; + } + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so * malloc_default_purgeable_zone() is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. - */ + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : malloc_default_purgeable_zone(); - - /* Register the custom zone. At this point it won't be the default. */ + + /* Register the custom zone. At this point it won't be the default. */ zone_init(); malloc_zone_register(&jemalloc_zone); - + /* Promote the custom zone to be default. */ zone_promote(); -} +} diff --git a/contrib/libs/jemalloc/ya.make b/contrib/libs/jemalloc/ya.make index 586de30ab0..2f616f62b0 100644 --- a/contrib/libs/jemalloc/ya.make +++ b/contrib/libs/jemalloc/ya.make @@ -1,6 +1,6 @@ # Generated by devtools/yamaker from nixpkgs 21.11. -LIBRARY() +LIBRARY() OWNER( g:contrib @@ -18,11 +18,11 @@ LICENSE( LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -ADDINCL( +ADDINCL( contrib/libs/jemalloc/include contrib/libs/libunwind/include -) - +) + IF (OS_WINDOWS) ADDINCL( contrib/libs/jemalloc/include/msvc_compat @@ -50,42 +50,42 @@ NO_COMPILER_WARNINGS() NO_UTIL() -SRCS( +SRCS( hack.cpp - src/arena.c + src/arena.c src/background_thread.c - src/base.c + src/base.c src/bin.c - src/bitmap.c - src/ckh.c - src/ctl.c + src/bitmap.c + src/ckh.c + src/ctl.c src/div.c - src/extent.c + src/extent.c src/extent_dss.c src/extent_mmap.c - src/hash.c + src/hash.c src/hook.c - src/jemalloc.c + src/jemalloc.c src/jemalloc_cpp.cpp src/large.c src/log.c src/malloc_io.c - src/mutex.c + src/mutex.c src/mutex_pool.c src/nstime.c src/pages.c src/prng.c - src/prof.c - src/rtree.c + src/prof.c + src/rtree.c src/safety_check.c src/sc.c - src/stats.c + src/stats.c src/sz.c - src/tcache.c + src/tcache.c src/test_hooks.c src/ticker.c - src/tsd.c + src/tsd.c src/witness.c -) - -END() +) + +END() |